input stringlengths 2.65k 237k | output stringclasses 1
value |
|---|---|
from config.utils import *
from collections import defaultdict
import config.anchor as anchor
import time
class MatchEngine(object):
"""A class that handles the book-keeping for the matching process.
Attributes
----------
logger (logger): elementals logger instance
disas (disassembler): disassembler layer handler
src_functions_ctx (list): ordered list of source function contexts
bin_functions_ctx (dict): mapping of binary address to binary context: bin ea => bin function ctx
function_matches (dict): mapping of all (non-external) matches: src index => bin ea
_floating_bin_functions (list): list of all of the binary function contexts in the total range of the floating files
_floating_files (list): ordered list of currently floating files (FileMatch instances)
_src_unused_functions (set): set of (src) indices for unused functions (disabled functions)
_src_functions_list (list): ordered list of source function names
_src_file_mappings (dict): mapping of file name => list of source function contexts
_match_files (list): list of FileMatch instances, one for every source file
_src_file_names (list): list of active (non-empty) source file names
_bin_matched_ea (dict): reverse mapping for all matches: bin ea => src index / external ctx
_matched_anchors_ea (dict): matching mapping for the anchor functions: src index ==> bin ea
_src_anchor_list (list): list of source indices for the matched anchor functions
_bin_anchor_list (list): sorted list of binary indices of the matched anchor functions
"""
def __init__(self, logger, disas):
"""Create the basic instance.
Args:
logger (logger): elementals logger instance
disas (disassembler): disassembler layer handler
"""
self.logger = logger
self.disas = disas
self.function_matches = {}
self.src_functions_ctx = []
self.bin_functions_ctx = {}
self._floating_bin_functions = None
self._floating_files = []
self._src_unused_functions = set()
self._src_functions_list = []
self._src_file_mappings = {}
self._match_files = []
self._src_file_names = []
self._bin_matched_ea = {}
self._matched_anchors_ea = {}
self._src_anchor_list = []
self._bin_anchor_list = []
def binMatched(self, ea):
"""Check if the given effective address was already matched.
Return Value:
True if ea was matched to some source function
"""
return ea in self._bin_matched_ea
def matchedSrcIndices(self):
"""Return a list of all indices of matched source functions.
Return Value:
list of source indices for matched functions
"""
return self.function_matches.keys()
def floatingBinFunctions(self):
"""Return an ordered list of all scoped (floating) binary contexts.
Return Value:
list of all floating binary contexts
"""
return self._floating_bin_functions
def floatingRepresentative(self):
"""Return the FileMatch instance that represents all of the floating files.
Return Value:
representative floating file instance, or None if there is no such file
"""
if len(self._floating_files) > 0:
return self._floating_files[0]
else:
return None
def nextFloatingRepresentative(self):
"""Return a FileMatch instance that is next-in-line to represent the floating files.
Return Value:
next-representative floating file instance, or None if there is no such file
"""
if len(self._floating_files) > 1:
return self._floating_files[1]
else:
return None
def markUnused(self, src_indices):
"""Mark a collection of source functions as unused (ifdeffed / inlined), based on their source indices.
Args:
src_indices (collection): collection of source indices of the (now) unused functions
"""
self._src_unused_functions.update(src_indices)
for src_index in src_indices:
self.src_functions_ctx[src_index].disable()
def shrinkFloatingBinFunctions(self, lower_cut, upper_cut):
"""Shrink the focused scope of binary functions used for representing the overall floating file.
Args:
lower_cut (int): number of functions to be removed from the lower end
upper_cut (int): number of functions to be removed from the upper end
"""
# No need to actually expel() these, as we just purged them from our working set.
# expelled_funcs = floating_bin_functions[ : lower_cut] + floating_bin_functions[-1 * upper_cut : ]
# This line means we simply won't see them anymore and that's it
if upper_cut == 0:
self._floating_bin_functions = self._floating_bin_functions[lower_cut:]
elif lower_cut == 0:
self._floating_bin_functions = self._floating_bin_functions[: -1 * upper_cut]
else:
self._floating_bin_functions = self._floating_bin_functions[lower_cut: -1 * upper_cut]
# update the floating representative file
floating_representative = self.floatingRepresentative()
if upper_cut != 0:
floating_representative._upper_leftovers -= upper_cut
floating_representative._bin_limit_upper -= upper_cut
elif lower_cut != 0:
floating_representative._lower_leftovers -= lower_cut
floating_representative._bin_limit_lower += lower_cut
def locatedFile(self, file_match):
"""Mark a given file as "located", i.e. not floating any more.
Args:
file_match (FileMatch): source file that was now pinned to a given place in the memory space
"""
self._floating_files.remove(file_match)
def loadAndMatchAnchors(self, anchors_config, manual_anchors_config):
"""Load the list of anchor functions, and try to match them with the binary.
Args:
anchors_config (list): list of anchor src indices
manual_anchors_config (list): list of user defined matches (Manual Anchors): (src index, bin_ea)
"""
# Parse the anchors file
self.logger.info("Loading the list of Anchor functions")
self._src_anchor_list = anchors_config
# Locate the anchor functions
self.logger.info("Searching for the Anchor functions in the binary")
self.logger.addIndent()
all_bin_functions = self.disas.functions()
# range narrowing variables
lower_match_ea = None
upper_match_ea = None
lower_match_index = None
upper_match_index = None
lower_border_ea = 0
upper_border_ea = 2 ** 64 - 1
lower_border_index = None
upper_border_index = None
function_range = None
overall_num_functions = len(self._src_functions_list)
multiple_option_candidates = []
anchor_eas = []
first_const_anchor = True
efficient_const_search = False
# pre-scan (for optimization reasons)
anchor_stats = []
num_const_clues = 0
all_const_clues = set()
all_string_clues = set()
seen_strings, seen_consts, function_list = getContextsStats()
for src_anchor_index in list(self._src_anchor_list):
src_func_ctx = self.src_functions_ctx[src_anchor_index]
is_str, threshold, anchor_clues = anchor.isAnchor(src_func_ctx, seen_strings, seen_consts, function_list, self.logger)
# sanity check
if anchor_clues is None:
self._src_anchor_list.remove(src_anchor_index)
self.logger.warning("Anchor candidate %s (%d) failed as an anchor function", src_func_ctx.name, src_anchor_index)
continue
anchor_stats.append((src_anchor_index, src_func_ctx, is_str, threshold, anchor_clues))
if is_str:
all_string_clues = all_string_clues.union(anchor_clues)
else:
num_const_clues += len(anchor_clues)
all_const_clues = all_const_clues.union(anchor_clues)
# Traverse all of the strings only once, it is heavy
anchor_bin_strs = defaultdict(list)
# Scanning the entire string list and checking against each anchor string - O(kN) - efficient in memory
if len(all_string_clues) > 0:
for bin_str_ctx in self.disas.strings():
bin_str = str(bin_str_ctx)
if bin_str in all_string_clues:
anchor_bin_strs[bin_str].append(bin_str_ctx)
# full scan (maybe only string scan)
for src_anchor_index, src_func_ctx, is_str, threshold, anchor_clues in anchor_stats:
candidates = None
candidate_sets = []
# scan the full clue list
for clue_idx, clue in enumerate(anchor_clues):
# strings
if is_str:
current_set = set()
# found the string clue in the binary
if clue in anchor_bin_strs:
for bin_str in anchor_bin_strs[clue]:
for ref in self.disas.drefsTo(bin_str.ea):
caller_func = self.disas.funcAt(ref)
if caller_func is None:
continue
callar_func_start = self.disas.funcStart(caller_func)
if lower_border_ea <= callar_func_start and callar_func_start <= upper_border_ea:
current_set.add(callar_func_start)
# consts
else:
# measure some times (for the first one only)
if first_const_anchor:
start_time = time.time()
# scanning the entire firmware per anchor const - O(kN)
current_set = set()
# search for it in the binary (non efficient)
if lower_match_index is None or not efficient_const_search:
search_start = lower_border_ea if not first_const_anchor else 0
search_end = upper_border_ea if not first_const_anchor else (2 ** 64 - 1)
# start our search
for match_ea in self.disas.findImmediate(search_start, search_end, clue):
# Filter out matches that are not inside functions
caller_func = self.disas.funcAt(match_ea)
if caller_func is not None:
current_set.add(self.disas.funcStart(caller_func))
# measure the end time too
if first_const_anchor:
end_time = time.time()
overall_search_time = (end_time - start_time) * num_const_clues
if lower_match_index is None:
efficient_const_search = anchor.MAXIMAL_CONST_SEARCH_TIME <= overall_search_time
else:
efficient_const_search = anchor.MAXIMAL_CONST_SEARCH_RATE <= overall_search_time * 1.0 / (upper_match_index - lower_match_index + 1)
# no longer the first const
first_const_anchor = False
# efficient search
else:
if function_range is None:
self.logger.info("Anchor search - switching to efficient const search mode")
# build the fast mapping, and then continue as before
function_range = []
for function_ea in all_bin_functions[lower_border_index:upper_border_index]:
function_range.append((function_ea, self.disas.locateAnchorConsts(function_ea, all_const_clues)))
# Now actually search for the wanted const value in the result sets
for function_ea, const_set in function_range:
if clue in const_set:
current_set.add(function_ea)
# Same merging logic, for strings and consts
# simply add this option (only if relevant)
if len(current_set) > 0:
candidate_sets.append(current_set)
# check if reached the limit
if len(candidate_sets) >= threshold:
# start checking for a match
candidate_attempt = defaultdict(int)
for candidate_set in candidate_sets:
for candidate in candidate_set:
candidate_attempt[candidate] += 1
candidates = filter(lambda x: candidate_attempt[x] >= threshold, candidate_attempt.keys())
future_candidates = filter(lambda x: candidate_attempt[x] >= threshold - (len(anchor_clues) - (clue_idx + 1)), candidate_attempt.keys())
# stop condition
if len(candidates) == 1 and len(future_candidates) == 0:
break
# check if needs to decide between multiple options
if candidates is not None and len(candidates) > 1:
sorted_candidates = candidate_attempt.keys()
sorted_candidates.sort(key=lambda x: candidate_attempt[x], reverse=True)
# if we have an absolute winner, than pick it (safe to access both cells because len() | |
<reponame>wpj1530882136/RCDemo<gh_stars>0
# -*- coding:utf8 -*-
# ==============================================================================
# Copyright 2017 Baidu.com, Inc. All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""
This module implements the reading comprehension models based on:
1. the BiDAF algorithm described in https://arxiv.org/abs/1611.01603
2. the Match-LSTM algorithm described in https://openreview.net/pdf?id=B1-q5Pqxl
Note that we use Pointer Network for the decoding stage of both models.
"""
from numpy.random import seed
seed(777)
from tensorflow import set_random_seed
set_random_seed(777)
import os
import time
import logging
import json
import numpy as np
import tensorflow as tf
import tensorflow.contrib as tc
from utils import compute_bleu_rouge
from utils import normalize
from layers.basic_rnn import rnn
from layers.match_layer import MatchLSTMLayer
from layers.match_layer import AttentionFlowMatchLayer
from layers.pointer_net import PointerNetDecoder
import layers.cnn_layer as cnn_layer
initializer = lambda: tf.contrib.layers.variance_scaling_initializer(factor=1.0,
mode='FAN_AVG',
uniform=True,
dtype=tf.float32)
initializer_relu = lambda: tf.contrib.layers.variance_scaling_initializer(factor=2.0,
mode='FAN_IN',
uniform=False,
dtype=tf.float32)
regularizer = tf.contrib.layers.l2_regularizer(scale = 3e-7)
class RCModel(object):
"""
Implements the main reading comprehension model.
"""
def __init__(self, term_vocab, char_vocab, args):
# logging
self.logger = logging.getLogger("brc")
# basic config
self.algo = args.algo
self.hidden_size = args.hidden_size
self.optim_type = args.optim
self.learning_rate = args.learning_rate
self.weight_decay = args.weight_decay
self.use_dropout = args.dropout_keep_prob < 1
# length limit
self.max_p_num = args.max_p_num
self.max_p_len = args.max_p_len
self.max_q_len = args.max_q_len
self.max_a_len = args.max_a_len
self.max_char_num = args.max_char_num
# the vocab
self.term_vocab = term_vocab
self.char_vocab = char_vocab
self.emb_size = self.term_vocab.embed_dim
# session info
sess_config = tf.ConfigProto()
sess_config.gpu_options.allow_growth = True
self.sess = tf.Session(config=sess_config)
self._build_graph()
# save info
self.saver = tf.train.Saver()
# initialize the model
self.sess.run(tf.global_variables_initializer())
def _build_graph(self):
"""
Builds the computation graph with Tensorflow
"""
start_t = time.time()
self._setup_placeholders()
self._embed()
self._encode_back()
self._encode()
self._match()
self._fuse()
self._decode()
self._compute_loss()
self._create_train_op()
self.logger.info('Time to build graph: {} s'.format(time.time() - start_t))
param_num = sum([np.prod(self.sess.run(tf.shape(v))) for v in self.all_params])
self.logger.info('There are {} parameters in the model'.format(param_num))
def _setup_placeholders(self):
"""
Placeholders
"""
self.p = tf.placeholder(tf.int32, [None, None])
self.q = tf.placeholder(tf.int32, [None, None])
self.p_mask = tf.cast(self.p, tf.bool) # [batch, p_len]
self.q_mask = tf.cast(self.q, tf.bool) # [batch, q_len]
self.p_char = tf.placeholder(tf.int32, [None, None, self.max_char_num], name='p_char')
self.q_char = tf.placeholder(tf.int32, [None, None, self.max_char_num], name='q_char')
self.p_length = tf.placeholder(tf.int32, [None])
self.q_length = tf.placeholder(tf.int32, [None])
self.start_label = tf.placeholder(tf.int32, [None])
self.end_label = tf.placeholder(tf.int32, [None])
self.dropout_keep_prob = tf.placeholder(tf.float32)
def _embed(self):
"""
The embedding layer, question and passage share embeddings
"""
with tf.variable_scope('word_embedding'):
self.word_embeddings = tf.get_variable(
'word_embeddings',
shape=(self.term_vocab.size(), self.term_vocab.embed_dim),
initializer=tf.constant_initializer(self.term_vocab.embeddings),
trainable=True
)
self.p_word_emb = tf.nn.embedding_lookup(self.word_embeddings, self.p)
self.q_word_emb = tf.nn.embedding_lookup(self.word_embeddings, self.q)
with tf.variable_scope('char_embedding'):
self.char_embeddings = tf.get_variable(
'char_embeddings',
shape=(self.char_vocab.size(), self.char_vocab.embed_dim),
initializer=tf.constant_initializer(self.char_vocab.embeddings),
trainable=True
)
self.p_char_emb = tf.nn.embedding_lookup(self.char_embeddings, self.p_char) # [batch, seqlen, max_char_num, embedding_size]
self.q_char_emb = tf.nn.embedding_lookup(self.char_embeddings, self.q_char)
self.p_char_emb = self.cnn_emb(self.p_char_emb, "p_emb")
self.q_char_emb = self.cnn_emb(self.q_char_emb, "q_emb")
'''
self.p_char_emb = tf.reshape(self.p_char_emb, [-1, self.max_char_num, self.emb_size])
self.q_char_emb = tf.reshape(self.q_char_emb, [-1, self.max_char_num, self.emb_size])
self.p_char_emb = cnn_layer.conv(self.p_char_emb, self.emb_size,
bias=True, activation=tf.nn.relu, kernel_size=5, name="char_conv", reuse=None)
self.q_char_emb = cnn_layer.conv(self.q_char_emb, self.emb_size,
bias=True, activation=tf.nn.relu, kernel_size=5, name="char_conv", reuse=True)
self.p_char_emb = tf.reduce_max(self.p_char_emb, axis=1) # [batch*seqlen, 1, emb_size]
self.q_char_emb = tf.reduce_max(self.q_char_emb, axis=1)
batch_size = tf.shape(self.p_word_emb)[0]
self.p_char_emb = tf.reshape(self.p_char_emb, [batch_size, -1, self.emb_size])
self.q_char_emb = tf.reshape(self.q_char_emb, [batch_size, -1, self.emb_size])
self.p_char_emb = tf.nn.dropout(self.p_char_emb, 0.95)
self.q_char_emb = tf.nn.dropout(self.q_char_emb, 0.95)
'''
self.p_emb = tf.concat([self.p_word_emb, self.p_char_emb], -1)
self.q_emb = tf.concat([self.q_word_emb, self.q_char_emb], -1)
# self.p_emb = cnn_layer.highway(self.p_emb, size=self.hidden_size, scope="highway", dropout=0.1, reuse=None)
# self.q_emb = cnn_layer.highway(self.q_emb, size=self.hidden_size, scope="highway", dropout=0.1, reuse=True)
def _encode_back(self):
"""
Employs two Bi-LSTMs to encode passage and question separately
"""
with tf.variable_scope('passage_encoding'):
self.sep_p_encodes, _ = rnn('bi-lstm', self.p_emb, self.p_length, self.hidden_size)
with tf.variable_scope('question_encoding'):
self.sep_q_encodes, _ = rnn('bi-lstm', self.q_emb, self.q_length, self.hidden_size)
if self.use_dropout:
self.sep_p_encodes = tf.nn.dropout(self.sep_p_encodes, self.dropout_keep_prob)
self.sep_q_encodes = tf.nn.dropout(self.sep_q_encodes, self.dropout_keep_prob)
def _encode(self):
with tf.variable_scope("Embedding_Encoder_Layer"):
self.sep_p_encodes = cnn_layer.residual_block(self.sep_p_encodes,
num_blocks=1,
num_conv_layers=4,
kernel_size=5,
mask=self.p_mask,
num_filters=self.hidden_size,
input_projection = True,
num_heads=1,
seq_len=self.p_length,
scope="Encoder_Residual_Block",
bias=False,
dropout=1-self.dropout_keep_prob)
self.sep_q_encodes = cnn_layer.residual_block(self.sep_q_encodes,
num_blocks=1,
num_conv_layers=4,
kernel_size=5,
mask=self.q_mask,
num_filters=self.hidden_size,
input_projection=True,
num_heads=1,
seq_len=self.q_length,
scope="Encoder_Residual_Block",
reuse=True, # Share the weights between passage and question
bias=False,
dropout=1-self.dropout_keep_prob)
def _match_back(self):
"""
The core of RC model, get the question-aware passage encoding with either BIDAF or MLSTM
"""
if self.algo == 'MLSTM':
match_layer = MatchLSTMLayer(self.hidden_size)
elif self.algo == 'BIDAF':
match_layer = AttentionFlowMatchLayer(self.hidden_size)
else:
raise NotImplementedError('The algorithm {} is not implemented.'.format(self.algo))
self.match_p_encodes, _ = match_layer.match(self.sep_p_encodes, self.sep_q_encodes,
self.p_length, self.q_length)
if self.use_dropout:
self.match_p_encodes = tf.nn.dropout(self.match_p_encodes, self.dropout_keep_prob)
def _match(self):
with tf.variable_scope("Context_to_Query_Attention_Layer"):
S = cnn_layer.optimized_trilinear_for_attention([self.sep_p_encodes, self.sep_q_encodes], input_keep_prob = self.dropout_keep_prob)
mask_q = tf.expand_dims(self.q_mask, 1) #[batch, 1, q_len]
S_ = tf.nn.softmax(cnn_layer.mask_logits(S, mask = mask_q))
mask_c = tf.expand_dims(self.p_mask, 2) #[batch, p_len, 1]
S_T = tf.transpose(tf.nn.softmax(cnn_layer.mask_logits(S, mask = mask_c), dim = 1),(0,2,1)) #[batch, q_len, p_len]
self.c2q = tf.matmul(S_, self.sep_q_encodes) #[batch, p_len, emb_size]
self.q2c = tf.matmul(tf.matmul(S_, S_T), self.sep_p_encodes) #[batch, p_len, q_len] * [batch, q_len, p_len] * [batch, p_len, emb_size]
self.match_p_encodes = tf.concat(
[self.sep_p_encodes,
self.c2q,
self.sep_p_encodes * self.c2q,
self.sep_p_encodes * self.q2c
], -1)
def _fuse_back(self):
"""
Employs Bi-LSTM again to fuse the context information after match layer
"""
with tf.variable_scope('fusion'):
#self.fuse_p_encodes, _ = rnn('lstm', self.match_p_encodes, self.p_length,
# self.hidden_size, layer_num=1)
self.fuse_p_encodes = tc.layers.fully_connected(self.match_p_encodes, self.hidden_size)
if self.use_dropout:
self.fuse_p_encodes = tf.nn.dropout(self.fuse_p_encodes, self.dropout_keep_prob)
def _fuse(self):
with tf.variable_scope("Model_Encoder_Layer"):
inputs = self.match_p_encodes
self.enc = [cnn_layer.conv(inputs, self.hidden_size, name="input_projection")]
for i in range(3):
if i % 2 == 0: # dropout every 2 blocks
self.enc[i] = tf.nn.dropout(self.enc[i], self.dropout_keep_prob)
self.enc.append(
cnn_layer.residual_block(self.enc[i],
num_blocks=1,
num_conv_layers=2,
kernel_size=5,
mask=self.p_mask,
num_filters=self.hidden_size,
num_heads=1,
seq_len=self.p_length,
scope="Model_Encoder",
bias=False,
reuse=True if i > 0 else None,
dropout=1-self.dropout_keep_prob)
)
def _decode_back(self):
"""
Employs Pointer Network to get the the probs of each position
to be the start or end of the predicted answer.
Note that we concat the fuse_p_encodes for the passages in the same document.
And since the encodes of queries in the same document is same, we select the first one.
"""
with tf.variable_scope('same_question_concat'):
batch_size = tf.shape(self.start_label)[0]
concat_passage_encodes = tf.reshape(
self.fuse_p_encodes,
[batch_size, -1, self.hidden_size]
)
no_dup_question_encodes = tf.reshape(
self.sep_q_encodes,
[batch_size, -1, tf.shape(self.sep_q_encodes)[1], self.hidden_size]
)[0:, 0, 0:, 0:]
decoder = PointerNetDecoder(self.hidden_size)
self.start_probs, self.end_probs = decoder.decode(concat_passage_encodes,
no_dup_question_encodes)
outer = tf.matmul(tf.expand_dims(tf.nn.softmax(self.start_probs), axis=2),
tf.expand_dims(tf.nn.softmax(self.end_probs), axis=1))
outer = tf.matrix_band_part(outer, 0, -1)
self.yp1 = tf.argmax(tf.reduce_max(outer, axis=2), axis=1)
self.yp2 = tf.argmax(tf.reduce_max(outer, axis=1), axis=1)
'''
def get_prio_probs(self):
with tf.variable_scope("prio_probs"):
sim = tf.matmul(self.p_emb, self.q_emb, transpose_b=True)
sim = tf.nn.softmax(sim, -1)#[batch, p_len, q_len]
output = tf.reduce_max(self.sim, -1) #[batch, p_len]
output = tf.expand_dims(tf.sigmoid(output), -1) #[batch, p_len, 1]
w = tf.constant([0, 0, 1, 0.8, 0.6], shape=[5, 1, 1], dtype=tf.float32)
probs = tf.nn.conv1d(output, w, stride=1, padding="SAME") #[batch, p_len, 1]
probs = tf.nn.softmax(tf.squeeze(probs, 2), -1)
return probs
'''
def _decode(self):
with tf.variable_scope("Output_Layer"):
batch_size = tf.shape(self.start_label)[0]
self.enc[1] = tf.reshape(self.enc[1], [batch_size, -1, self.hidden_size])
self.enc[2] = tf.reshape(self.enc[2], [batch_size, -1, self.hidden_size])
self.enc[3] = tf.reshape(self.enc[3], [batch_size, -1, self.hidden_size])
p_mask = tf.reshape(self.p_mask, [batch_size, -1])
start_logits = tf.squeeze(
cnn_layer.conv(tf.concat([self.enc[1], self.enc[2]], axis=-1), 1, bias=False, name="start_pointer"),
-1) # [batch, p_len]
end_logits = tf.squeeze(
cnn_layer.conv(tf.concat([self.enc[1], self.enc[3]], axis=-1), 1, bias=False, name="end_pointer"),
-1) # [batch, p_len]
self.logits = [cnn_layer.mask_logits(start_logits, mask=p_mask),
cnn_layer.mask_logits(end_logits, mask=p_mask)]
self.start_probs, self.end_probs = [l for l in self.logits]
#temp_p_emb = tf.nn.embedding_lookup(self.word_embeddings, self.p)
#temp_q_emb = tf.nn.embedding_lookup(self.word_embeddings, self.q)
temp_p_emb = self.p_word_emb
temp_q_emb = self.q_word_emb
self.norm_p_emb = temp_p_emb / tf.sqrt(tf.reduce_sum(temp_p_emb*temp_p_emb, -1, keep_dims=True))
self.norm_q_emb = temp_q_emb / tf.sqrt(tf.reduce_sum(temp_q_emb*temp_q_emb, -1, keep_dims=True))
self.sim = tf.matmul(self.norm_p_emb, self.norm_q_emb, transpose_b=True)
output = tf.reduce_max(self.sim, -1) # [batch, p_len]
output = tf.expand_dims(tf.sigmoid(output), -1) # [batch, p_len, 1]
w = tf.constant([0, 0, 1, 0.8, 0.6], shape=[5, 1, 1], dtype=tf.float32)
probs = tf.nn.conv1d(output, w, stride=1, padding="SAME") # [batch, p_len, 1]
self.prio_probs = tf.nn.softmax(tf.squeeze(probs, 2), -1)
self.start_probs = tf.nn.softmax(self.start_probs, -1)
self.end_probs = tf.nn.softmax(self.end_probs, -1)
outer = tf.matmul(tf.expand_dims(tf.nn.softmax(self.start_probs), axis=2),
tf.expand_dims(tf.nn.softmax(self.end_probs), axis=1))
outer = tf.matrix_band_part(outer, 0, -1)
self.yp1 = tf.argmax(tf.reduce_max(outer, axis=2), axis=1)
self.yp2 = tf.argmax(tf.reduce_max(outer, axis=1), axis=1)
def _compute_loss(self):
"""
The loss function
"""
def sparse_nll_loss(probs, labels, epsilon=1e-9, scope=None):
"""
negative log likelyhood loss
"""
with tf.name_scope(scope, "log_loss"):
labels = tf.one_hot(labels, tf.shape(probs)[1], axis=1)
losses = - tf.reduce_sum(labels * tf.log(probs + epsilon), 1)
return losses
self.start_loss = sparse_nll_loss(probs=self.start_probs, labels=self.start_label)
self.end_loss = sparse_nll_loss(probs=self.end_probs, labels=self.end_label)
self.all_params = tf.trainable_variables()
self.loss = tf.reduce_mean(tf.add(self.start_loss, self.end_loss))
if self.weight_decay > 0:
with tf.variable_scope('l2_loss'):
l2_loss = tf.add_n([tf.nn.l2_loss(v) for v in self.all_params])
self.loss += self.weight_decay * l2_loss
def _create_train_op(self):
"""
Selects the training algorithm and creates a train operation with it
"""
if self.optim_type == 'adagrad':
self.optimizer = tf.train.AdagradOptimizer(self.learning_rate)
elif self.optim_type == 'adam':
self.optimizer = tf.train.AdamOptimizer(self.learning_rate)
elif self.optim_type | |
#!/usr/bin/env python
"""Make ATLAS Stamps in the context of the transient server database.
Usage:
%s <configfile> [<candidate>...] [--detectionlist=<detectionlist>] [--customlist=<customlist>] [--limit=<limit>] [--earliest] [--nondetections] [--discoverylimit=<discoverylimit>] [--lastdetectionlimit=<lastdetectionlimit>] [--requesttype=<requesttype>] [--wpwarp=<wpwarp>] [--update] [--ddc] [--skipdownload] [--redregex=<redregex>] [--diffregex=<diffregex>] [--redlocation=<redlocation>] [--difflocation=<difflocation>]
%s (-h | --help)
%s --version
Options:
-h --help Show this screen.
--version Show version.
--update Update the database
--detectionlist=<detectionlist> List option
--customlist=<customlist> Custom List option
--limit=<limit> Number of detections for which to request images [default: 6]
--earliest By default, get the most recent stamps. Otherwise get the earliest ones.
--nondetections Request non-detections.
--discoverylimit=<discoverylimit> Number of days before which we will not request images (ignored if non-detections not requested) [default: 10]
--lastdetectionlimit=<lastdetectionlimit> Number of days after the last detection we will request images (ignored if non-detections not requested) [default: 20]
--requesttype=<requesttype> Request type (all | incremental) [default: incremental]
--ddc Use the DDC schema for queries
--skipdownload Do not attempt to download the exposures (assumes they already exist locally)
--wpwarp=<wpwarp> Which version of wpwarp to use? (1 or 2) [default: 2]
--redregex=<redregex> Reduced image regular expression. Caps = variable. [default: EXPNAME.fits.fz]
--diffregex=<diffregex> Diff image regular expression. Caps = variable. [default: EXPNAME.diff.fz]
--redlocation=<redlocation> Reduced image location. E.g. /atlas/diff/CAMERA/fake/MJD.fake (caps = special variable). Null value means use standard ATLAS archive location.
--difflocation=<difflocation> Diff image location. E.g. /atlas/diff/CAMERA/fake/MJD.fake (caps = special variable). Null value means use standard ATLAS archive location.
E.g.:
%s ~/config_fakers.yaml 1130252001002421600 --ddc --skipdownload --redlocation=/atlas/diff/CAMERA/fake/MJD.fake --redregex=EXPNAME.fits+fake --difflocation=/atlas/diff/CAMERA/fake/MJD.fake --diffregex=EXPNAME.diff+fake
"""
import sys
__doc__ = __doc__ % (sys.argv[0], sys.argv[0], sys.argv[0], sys.argv[0])
from docopt import docopt
import os, shutil, re, csv, subprocess
# In this module, we want to split out the rsync stuff from the stamp production.
# The reason for this is that we want to be able to call this code from a multiprocessing
# module, which will multiprocess the rsyncs differently from the stamp production.
# We'll need to rewrite the code below so that we can do the split.
# This time, we'll do the rsync calculation at the beginning of the code for BOTH
# diff and target images.
# So the sequence will be:
# 1. Calculate unique exposures.
# 2. Multiprocessing: Kick off up to 10 rsync threads to GET the exposures. To do this
# we need to split out the rsync code into a worker method.
# 3. When rsync complete, split the objects into nCPU threads and generate stamps
# Notes: 1. The code should be flexible enough to use EITHER getfits or monsta.
# 2. The code should be flexible enough to use EITHER internal generation of jpegs OR monsta generated jpegs.
# If generating jpegs with monsta, need to specify what max and min are. (Maybe hard wired, as in some of the monsta
# code.
# 2015-12-02 KWS Added new version of this code.
import sys, os, errno
import datetime
import subprocess
from gkutils.commonutils import dbConnect, PROCESSING_FLAGS, calculateRMSScatter, Struct, cleanOptions
import MySQLdb
from pstamp_utils import getLightcurveDetectionsAtlas2, getExistingDetectionImages, getExistingNonDetectionImages, DETECTIONTYPES, REQUESTTYPES, PSTAMP_SUCCESS, PSTAMP_NO_OVERLAP, PSTAMP_SYSTEM_ERROR, IPP_IDET_NON_DETECTION_VALUE, insertPostageStampImageRecordAtlas
import image_utils as imu
#import pyfits as pf
from astropy.io import fits as pf
from psat_server_web.atlas.atlas.commonqueries import getLightcurvePoints, getNonDetections, getNonDetectionsUsingATLASFootprint, ATLAS_METADATADDC, filterWhereClauseddc, LC_POINTS_QUERY_ATLAS_DDC, FILTERS
from random import shuffle
def updateAtlasObjectProcessingFlag(conn, candidate, processingFlag = PROCESSING_FLAGS['stamps']):
"""Update the processing flag for the relevant database object to prevent repeat processing of the same objects.
Args:
conn:
candidate:
processingFlag:
"""
import MySQLdb
updatedRows = 0
try:
cursor = conn.cursor (MySQLdb.cursors.DictCursor)
cursor.execute ("""
update atlas_diff_objects
set processing_flags = if(processing_flags is null, %s, processing_flags | %s)
where id = %s
""", (processingFlag, processingFlag, candidate['id']))
updatedRows = cursor.rowcount
cursor.close()
except MySQLdb.Error as e:
sys.stderr.write("Error %d: %s\n" % (e.args[0], e.args[1]))
return updatedRows
def eliminateExistingImages(conn, candidate, detections, detectionsWithImages):
"""eliminateExistingImages.
Args:
conn:
candidate:
detections:
detectionsWithImages:
"""
imagesToRequest = []
for row in detections:
if '%d_%s_%s_%d' % (candidate, row.tdate, row.expname, row.tphot_id) not in detectionsWithImages:
imagesToRequest.append(row)
return imagesToRequest
# imageType = 'diff' or 'red'
#def doRsync(exposureSet, imageType, userId = 'xfer', remoteMachine = 'atlas-base-adm02.ifa.hawaii.edu', remoteLocation = '/atlas', localLocation = '/atlas', getMetadata = False, metadataExtension = '.tph'):
def doRsync(exposureSet, imageType, userId = 'xfer', remoteMachine = 'atlas-base-adm02.ifa.hawaii.edu', remoteLocation = '/atlas', localLocation = '/atlas', getMetadata = False, metadataExtension = '.tph'):
"""doRsync.
Args:
exposureSet:
imageType:
userId:
remoteMachine:
remoteLocation:
localLocation:
getMetadata:
metadataExtension:
"""
exposureSet.sort()
rsyncCmd = '/usr/bin/rsync'
if imageType not in ['diff','red']:
print("Image type must be diff or red")
return 1
imageExtension = {'diff':'.diff.fz','red':'.fits.fz'}
rsyncFile = '/tmp/rsyncFiles_' + imageType + str(os.getpid()) + '.txt'
# Create a diff and input rsync file
rsf = open(rsyncFile, 'w')
for exp in exposureSet:
camera = exp[0:3]
mjd = exp[3:8]
# # 2017-01-20 KWS Reprocessing of 'red' images taking place. Old data is in 02a.ORIG.
# if imageType == 'red' and camera == '02a' and int(mjd) <= 57771:
# camera = '02a.ORIG'
imageName = camera + '/' + mjd + '/' + exp + imageExtension[imageType]
if getMetadata:
# We don't need the image, just get the metadata
imageName = camera + '/' + mjd + '/' + exp + metadataExtension
if metadataExtension == '.tph' and int(mjd) >= 57350:
imageName = camera + '/' + mjd + '/' + 'AUX/' + exp + metadataExtension
rsf.write('%s\n' % imageName)
rsf.close()
remote = userId + '@' + remoteMachine + ':' + remoteLocation + '/' + imageType
local = localLocation + '/' + imageType
# Get the diff images
# 2018-04-16 KWS Removed the 'u' flag. We don't need to update the images.
#p = subprocess.Popen([rsyncCmd, '-e "ssh -c arcfour -o Compression=no"', '-axKL', '--files-from=%s' % rsyncFile, remote, local], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
p = subprocess.Popen([rsyncCmd, '-avxKL', '--files-from=%s' % rsyncFile, remote, local], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
output, errors = p.communicate()
if output.strip():
print(output)
if errors.strip():
print(errors)
return 0
# 2016-10-10 KWS I seem to be calling the same code exactly more than once, so lets just use a single function
def getLightcurveData(conn, candidate, limit = 0, mostRecent = True, nonDets = False, discoveryLimit = 30, lastDetectionLimit=20, requestType = REQUESTTYPES['incremental'], ddc = False):
"""getLightcurveData.
Args:
conn:
candidate:
limit:
mostRecent:
nonDets:
discoveryLimit:
lastDetectionLimit:
requestType:
ddc:
"""
#lightcurveData = getLightcurveDetectionsAtlas2(conn, candidate['id'], limit = limit, mostRecent = mostRecent)
if ddc:
p, recurrences = getLightcurvePoints(candidate['id'], lcQuery=LC_POINTS_QUERY_ATLAS_DDC + filterWhereClauseddc(FILTERS), conn = conn)
else:
p, recurrences = getLightcurvePoints(candidate['id'], conn = conn)
existingImages = getExistingDetectionImages(conn, candidate['id'])
firstDetection = recurrences[0]
lastDetection = recurrences[-1]
if mostRecent:
# reverse the order of the list
recurrences.reverse()
# Get the mean RA and Dec.
objectCoords = []
for row in recurrences:
objectCoords.append({'RA': row.ra, 'DEC': row.dec})
avgRa, avgDec, rms = calculateRMSScatter(objectCoords)
if limit:
if len(recurrences) > limit:
recurrences = recurrences[0:limit]
# 2018-11-03 KWS Don't bother requesting images more than 10 days
# older than the most recent recurrence. This will stop
# the rsyncing of old data.
# 2018-11-05 KWS Don't bother requesting images where row.dup < 0.
if mostRecent:
recentMJD = recurrences[0].mjd
truncatedRecurrences = []
for row in recurrences:
if row.dup >= 0 and row.mjd > recentMJD - 50:
truncatedRecurrences.append(row)
if len(truncatedRecurrences) > 0:
recurrences = truncatedRecurrences
# But only go back as far as firstDetection - discoveryLimit
if nonDets: # and not limit:
#b, blanks, lastNonDetection = getNonDetections(recurrences, conn = conn, searchRadius=500, tolerance = 0.001)
if ddc:
b, blanks, lastNonDetection = getNonDetectionsUsingATLASFootprint(recurrences, conn = conn, ndQuery=ATLAS_METADATADDC, filterWhereClause = filterWhereClauseddc, catalogueName = 'atlas_metadataddc')
else:
b, blanks, lastNonDetection = getNonDetectionsUsingATLASFootprint(recurrences, conn = conn)
existingImages += getExistingNonDetectionImages(conn, candidate['id'])
for row in blanks:
if row.mjd >= firstDetection.mjd - discoveryLimit and row.mjd <= lastDetection.mjd + lastDetectionLimit:
row.tphot_id = IPP_IDET_NON_DETECTION_VALUE
recurrences.append(row)
if requestType == REQUESTTYPES['incremental']:
recurrences = eliminateExistingImages(conn, candidate['id'], recurrences, existingImages)
return recurrences, avgRa, avgDec
# Function to find the unique exposures, given a candidate list.
# 2016-10-06 KWS Get the non-detections as well
def getUniqueExposures(conn, candidateList, limit = 0, mostRecent = True, nonDets = False, discoveryLimit = 10, lastDetectionLimit=20, requestType = REQUESTTYPES['incremental'], ddc = False):
"""getUniqueExposures.
Args:
conn:
candidateList:
limit:
mostRecent:
nonDets:
discoveryLimit:
lastDetectionLimit:
requestType:
ddc:
"""
print("Finding Unique Exposures...")
exposures = []
# Always get all of the detection exposures
for candidate in candidateList:
recurrences, avgRa, avgDec = getLightcurveData(conn, candidate, limit = limit, mostRecent = mostRecent, nonDets = nonDets, discoveryLimit = discoveryLimit, lastDetectionLimit=lastDetectionLimit, requestType = requestType, ddc = ddc)
for row in recurrences:
exposures.append(row.expname)
exposureSet = list(set(exposures))
# 2016-10-07 KWS The problem is that the most recent exposures are probably
# | |
<reponame>alexbowers/pulumi-aws
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = ['TopicSubscriptionArgs', 'TopicSubscription']
@pulumi.input_type
class TopicSubscriptionArgs:
def __init__(__self__, *,
endpoint: pulumi.Input[str],
protocol: pulumi.Input[str],
topic: pulumi.Input[str],
confirmation_timeout_in_minutes: Optional[pulumi.Input[int]] = None,
delivery_policy: Optional[pulumi.Input[str]] = None,
endpoint_auto_confirms: Optional[pulumi.Input[bool]] = None,
filter_policy: Optional[pulumi.Input[str]] = None,
raw_message_delivery: Optional[pulumi.Input[bool]] = None,
redrive_policy: Optional[pulumi.Input[str]] = None,
subscription_role_arn: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a TopicSubscription resource.
:param pulumi.Input[str] endpoint: Endpoint to send data to. The contents vary with the protocol. See details below.
:param pulumi.Input[str] protocol: Protocol to use. Valid values are: `sqs`, `sms`, `lambda`, `firehose`, and `application`. Protocols `email`, `email-json`, `http` and `https` are also valid but partially supported. See details below.
:param pulumi.Input[str] topic: ARN of the SNS topic to subscribe to.
:param pulumi.Input[int] confirmation_timeout_in_minutes: Integer indicating number of minutes to wait in retrying mode for fetching subscription arn before marking it as failure. Only applicable for http and https protocols. Default is `1`.
:param pulumi.Input[str] delivery_policy: JSON String with the delivery policy (retries, backoff, etc.) that will be used in the subscription - this only applies to HTTP/S subscriptions. Refer to the [SNS docs](https://docs.aws.amazon.com/sns/latest/dg/DeliveryPolicies.html) for more details.
:param pulumi.Input[bool] endpoint_auto_confirms: Whether the endpoint is capable of [auto confirming subscription](http://docs.aws.amazon.com/sns/latest/dg/SendMessageToHttp.html#SendMessageToHttp.prepare) (e.g., PagerDuty). Default is `false`.
:param pulumi.Input[str] filter_policy: JSON String with the filter policy that will be used in the subscription to filter messages seen by the target resource. Refer to the [SNS docs](https://docs.aws.amazon.com/sns/latest/dg/message-filtering.html) for more details.
:param pulumi.Input[bool] raw_message_delivery: Whether to enable raw message delivery (the original message is directly passed, not wrapped in JSON with the original message in the message property). Default is `false`.
:param pulumi.Input[str] redrive_policy: JSON String with the redrive policy that will be used in the subscription. Refer to the [SNS docs](https://docs.aws.amazon.com/sns/latest/dg/sns-dead-letter-queues.html#how-messages-moved-into-dead-letter-queue) for more details.
:param pulumi.Input[str] subscription_role_arn: ARN of the IAM role to publish to Kinesis Data Firehose delivery stream. Refer to [SNS docs](https://docs.aws.amazon.com/sns/latest/dg/sns-firehose-as-subscriber.html).
"""
pulumi.set(__self__, "endpoint", endpoint)
pulumi.set(__self__, "protocol", protocol)
pulumi.set(__self__, "topic", topic)
if confirmation_timeout_in_minutes is not None:
pulumi.set(__self__, "confirmation_timeout_in_minutes", confirmation_timeout_in_minutes)
if delivery_policy is not None:
pulumi.set(__self__, "delivery_policy", delivery_policy)
if endpoint_auto_confirms is not None:
pulumi.set(__self__, "endpoint_auto_confirms", endpoint_auto_confirms)
if filter_policy is not None:
pulumi.set(__self__, "filter_policy", filter_policy)
if raw_message_delivery is not None:
pulumi.set(__self__, "raw_message_delivery", raw_message_delivery)
if redrive_policy is not None:
pulumi.set(__self__, "redrive_policy", redrive_policy)
if subscription_role_arn is not None:
pulumi.set(__self__, "subscription_role_arn", subscription_role_arn)
@property
@pulumi.getter
def endpoint(self) -> pulumi.Input[str]:
"""
Endpoint to send data to. The contents vary with the protocol. See details below.
"""
return pulumi.get(self, "endpoint")
@endpoint.setter
def endpoint(self, value: pulumi.Input[str]):
pulumi.set(self, "endpoint", value)
@property
@pulumi.getter
def protocol(self) -> pulumi.Input[str]:
"""
Protocol to use. Valid values are: `sqs`, `sms`, `lambda`, `firehose`, and `application`. Protocols `email`, `email-json`, `http` and `https` are also valid but partially supported. See details below.
"""
return pulumi.get(self, "protocol")
@protocol.setter
def protocol(self, value: pulumi.Input[str]):
pulumi.set(self, "protocol", value)
@property
@pulumi.getter
def topic(self) -> pulumi.Input[str]:
"""
ARN of the SNS topic to subscribe to.
"""
return pulumi.get(self, "topic")
@topic.setter
def topic(self, value: pulumi.Input[str]):
pulumi.set(self, "topic", value)
@property
@pulumi.getter(name="confirmationTimeoutInMinutes")
def confirmation_timeout_in_minutes(self) -> Optional[pulumi.Input[int]]:
"""
Integer indicating number of minutes to wait in retrying mode for fetching subscription arn before marking it as failure. Only applicable for http and https protocols. Default is `1`.
"""
return pulumi.get(self, "confirmation_timeout_in_minutes")
@confirmation_timeout_in_minutes.setter
def confirmation_timeout_in_minutes(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "confirmation_timeout_in_minutes", value)
@property
@pulumi.getter(name="deliveryPolicy")
def delivery_policy(self) -> Optional[pulumi.Input[str]]:
"""
JSON String with the delivery policy (retries, backoff, etc.) that will be used in the subscription - this only applies to HTTP/S subscriptions. Refer to the [SNS docs](https://docs.aws.amazon.com/sns/latest/dg/DeliveryPolicies.html) for more details.
"""
return pulumi.get(self, "delivery_policy")
@delivery_policy.setter
def delivery_policy(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "delivery_policy", value)
@property
@pulumi.getter(name="endpointAutoConfirms")
def endpoint_auto_confirms(self) -> Optional[pulumi.Input[bool]]:
"""
Whether the endpoint is capable of [auto confirming subscription](http://docs.aws.amazon.com/sns/latest/dg/SendMessageToHttp.html#SendMessageToHttp.prepare) (e.g., PagerDuty). Default is `false`.
"""
return pulumi.get(self, "endpoint_auto_confirms")
@endpoint_auto_confirms.setter
def endpoint_auto_confirms(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "endpoint_auto_confirms", value)
@property
@pulumi.getter(name="filterPolicy")
def filter_policy(self) -> Optional[pulumi.Input[str]]:
"""
JSON String with the filter policy that will be used in the subscription to filter messages seen by the target resource. Refer to the [SNS docs](https://docs.aws.amazon.com/sns/latest/dg/message-filtering.html) for more details.
"""
return pulumi.get(self, "filter_policy")
@filter_policy.setter
def filter_policy(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "filter_policy", value)
@property
@pulumi.getter(name="rawMessageDelivery")
def raw_message_delivery(self) -> Optional[pulumi.Input[bool]]:
"""
Whether to enable raw message delivery (the original message is directly passed, not wrapped in JSON with the original message in the message property). Default is `false`.
"""
return pulumi.get(self, "raw_message_delivery")
@raw_message_delivery.setter
def raw_message_delivery(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "raw_message_delivery", value)
@property
@pulumi.getter(name="redrivePolicy")
def redrive_policy(self) -> Optional[pulumi.Input[str]]:
"""
JSON String with the redrive policy that will be used in the subscription. Refer to the [SNS docs](https://docs.aws.amazon.com/sns/latest/dg/sns-dead-letter-queues.html#how-messages-moved-into-dead-letter-queue) for more details.
"""
return pulumi.get(self, "redrive_policy")
@redrive_policy.setter
def redrive_policy(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "redrive_policy", value)
@property
@pulumi.getter(name="subscriptionRoleArn")
def subscription_role_arn(self) -> Optional[pulumi.Input[str]]:
"""
ARN of the IAM role to publish to Kinesis Data Firehose delivery stream. Refer to [SNS docs](https://docs.aws.amazon.com/sns/latest/dg/sns-firehose-as-subscriber.html).
"""
return pulumi.get(self, "subscription_role_arn")
@subscription_role_arn.setter
def subscription_role_arn(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "subscription_role_arn", value)
@pulumi.input_type
class _TopicSubscriptionState:
def __init__(__self__, *,
arn: Optional[pulumi.Input[str]] = None,
confirmation_timeout_in_minutes: Optional[pulumi.Input[int]] = None,
confirmation_was_authenticated: Optional[pulumi.Input[bool]] = None,
delivery_policy: Optional[pulumi.Input[str]] = None,
endpoint: Optional[pulumi.Input[str]] = None,
endpoint_auto_confirms: Optional[pulumi.Input[bool]] = None,
filter_policy: Optional[pulumi.Input[str]] = None,
owner_id: Optional[pulumi.Input[str]] = None,
pending_confirmation: Optional[pulumi.Input[bool]] = None,
protocol: Optional[pulumi.Input[str]] = None,
raw_message_delivery: Optional[pulumi.Input[bool]] = None,
redrive_policy: Optional[pulumi.Input[str]] = None,
subscription_role_arn: Optional[pulumi.Input[str]] = None,
topic: Optional[pulumi.Input[str]] = None):
"""
Input properties used for looking up and filtering TopicSubscription resources.
:param pulumi.Input[str] arn: ARN of the subscription.
:param pulumi.Input[int] confirmation_timeout_in_minutes: Integer indicating number of minutes to wait in retrying mode for fetching subscription arn before marking it as failure. Only applicable for http and https protocols. Default is `1`.
:param pulumi.Input[bool] confirmation_was_authenticated: Whether the subscription confirmation request was authenticated.
:param pulumi.Input[str] delivery_policy: JSON String with the delivery policy (retries, backoff, etc.) that will be used in the subscription - this only applies to HTTP/S subscriptions. Refer to the [SNS docs](https://docs.aws.amazon.com/sns/latest/dg/DeliveryPolicies.html) for more details.
:param pulumi.Input[str] endpoint: Endpoint to send data to. The contents vary with the protocol. See details below.
:param pulumi.Input[bool] endpoint_auto_confirms: Whether the endpoint is capable of [auto confirming subscription](http://docs.aws.amazon.com/sns/latest/dg/SendMessageToHttp.html#SendMessageToHttp.prepare) (e.g., PagerDuty). Default is `false`.
:param pulumi.Input[str] filter_policy: JSON String with the filter policy that will be used in the subscription to filter messages seen by the target resource. Refer to the [SNS docs](https://docs.aws.amazon.com/sns/latest/dg/message-filtering.html) for more details.
:param pulumi.Input[str] owner_id: AWS account ID of the subscription's owner.
:param pulumi.Input[bool] pending_confirmation: Whether the subscription has not been confirmed.
:param pulumi.Input[str] protocol: Protocol to use. Valid values are: `sqs`, `sms`, `lambda`, `firehose`, and `application`. Protocols `email`, `email-json`, `http` and `https` are also valid but partially supported. See details below.
:param pulumi.Input[bool] raw_message_delivery: Whether to enable raw message delivery (the original message is directly passed, not wrapped in JSON with the original message in the message property). Default is `false`.
:param pulumi.Input[str] redrive_policy: JSON String with the redrive policy that will be used in the subscription. Refer to the [SNS docs](https://docs.aws.amazon.com/sns/latest/dg/sns-dead-letter-queues.html#how-messages-moved-into-dead-letter-queue) for more details.
:param pulumi.Input[str] subscription_role_arn: ARN of the IAM role to publish to Kinesis Data Firehose delivery stream. Refer to [SNS docs](https://docs.aws.amazon.com/sns/latest/dg/sns-firehose-as-subscriber.html).
:param pulumi.Input[str] topic: ARN of the SNS topic to subscribe to.
"""
if arn is not None:
pulumi.set(__self__, "arn", arn)
if confirmation_timeout_in_minutes is not None:
pulumi.set(__self__, "confirmation_timeout_in_minutes", confirmation_timeout_in_minutes)
if confirmation_was_authenticated is not None:
pulumi.set(__self__, "confirmation_was_authenticated", confirmation_was_authenticated)
if delivery_policy is not None:
pulumi.set(__self__, "delivery_policy", delivery_policy)
if endpoint is not None:
pulumi.set(__self__, "endpoint", endpoint)
if endpoint_auto_confirms is not None:
pulumi.set(__self__, "endpoint_auto_confirms", endpoint_auto_confirms)
if filter_policy is not None:
pulumi.set(__self__, "filter_policy", filter_policy)
if owner_id is not None:
pulumi.set(__self__, "owner_id", owner_id)
if pending_confirmation is not None:
pulumi.set(__self__, "pending_confirmation", pending_confirmation)
if protocol is not None:
pulumi.set(__self__, "protocol", protocol)
if raw_message_delivery is not None:
pulumi.set(__self__, "raw_message_delivery", raw_message_delivery)
if redrive_policy is not None:
pulumi.set(__self__, "redrive_policy", redrive_policy)
if subscription_role_arn is not None:
pulumi.set(__self__, "subscription_role_arn", subscription_role_arn)
if topic is | |
import logging
import json
import glob
import pandas as pd
import multiprocessing
import numpy as np
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import AdaBoostClassifier
from sklearn.metrics import confusion_matrix
from sklearn.metrics import accuracy_score
from sklearn.metrics import classification_report
from sklearn.model_selection import GridSearchCV, KFold
from sklearn.model_selection import cross_val_predict
from sklearn.decomposition import IncrementalPCA
from scipy.stats import spearmanr
import config
from util import *
np.random.seed(config.RANDOM_SEED)
repo_lang = Repository_language()
def store_classification_result(model_name, language, model_classification_report, classification_results):
"""
Stores the result of the classifier
:param model_name: the classification type
:param language: programming language
:param model_classification_report: results
:param classification_results: results
"""
open('{}classification_result_raw_{}_{}.txt'.format(config.PREDICTION_RESULT_PATH, model_name, language), 'w')\
.write(model_classification_report)
open('{}classification_result_json_{}_{}.json'.format(config.PREDICTION_RESULT_PATH, model_name, language), 'w')\
.write(json.dumps(classification_results))
def data_classification_wo_cv(language, repo, data_train, label_train, data_test, label_test, random_seed=config.RANDOM_SEED, job_num=multiprocessing.cpu_count()):
"""
Trains the classifier
:param language: programming language
:param data: input data
:param label: input labels
:param random_seed: the random_seed
:param job_num: the number of cores to use
"""
# CV
inner_cv = KFold(n_splits=config.FOLD_NUM, shuffle=True, random_state=random_seed)
outer_cv = KFold(n_splits=config.FOLD_NUM, shuffle=True, random_state=random_seed)
# Hyper-parameters
tree_param = {'min_samples_leaf': config.MIN_SAMPLE_LEAVES, 'min_samples_split': config.MIN_SAMPLE_SPLIT,
'max_depth': config.TREE_MAX_DEPTH}
forest_param = {'n_estimators': config.ESTIMATOR_NUM, 'min_samples_leaf': config.MIN_SAMPLE_LEAVES,
'min_samples_split': config.MIN_SAMPLE_SPLIT}
boosting_param = {'n_estimators': config.ESTIMATOR_NUM, 'learning_rate': config.LEARNING_RATE}
# Grid search definition
grid_searches = [
GridSearchCV(DecisionTreeClassifier(class_weight='balanced', random_state = random_seed),
tree_param, cv=inner_cv, n_jobs=job_num, scoring=config.SCORING_FUNCTION)
, GridSearchCV(RandomForestClassifier(class_weight='balanced', n_jobs=job_num, random_state=random_seed),
forest_param, cv=inner_cv, n_jobs=job_num, scoring=config.SCORING_FUNCTION)
, GridSearchCV(ExtraTreesClassifier(n_jobs=job_num, class_weight='balanced', random_state=random_seed),
forest_param, cv=inner_cv, n_jobs=job_num, scoring=config.SCORING_FUNCTION),
GridSearchCV(AdaBoostClassifier(base_estimator=DecisionTreeClassifier(class_weight = 'balanced',
random_state=random_seed,
max_depth=2),
algorithm='SAMME.R', random_state=random_seed),
boosting_param, cv=inner_cv, n_jobs=job_num, scoring=config.SCORING_FUNCTION)
]
# Fitting the classifiers
classification_results = {}
res = []
for model in grid_searches:
# Model training/testing
model.score_sample_weight = True
model.fit(data_train, label_train)
model_name = str(type(model.best_estimator_)).replace('<class \'', '').replace('\'>', '').split('.')[-1]
model_best_param = model.best_params_
predicted_label = model.best_estimator_.predict(data_test)
t = get_metrics(label_test, predicted_label)
t['model_name'] = model_name
t['language'] = language
t['repository'] = repo
res.append(t)
return res
def data_classification(language, data, label, random_seed=config.RANDOM_SEED, job_num=multiprocessing.cpu_count()):
"""
Trains the classifier
:param language: programming language
:param data: input data
:param label: input labels
:param random_seed: the random_seed
:param job_num: the number of cores to use
"""
# CV
inner_cv = KFold(n_splits=config.FOLD_NUM, shuffle=True, random_state=random_seed)
outer_cv = KFold(n_splits=config.FOLD_NUM, shuffle=True, random_state=random_seed)
# Hyper-parameters
tree_param = {'min_samples_leaf': config.MIN_SAMPLE_LEAVES, 'min_samples_split': config.MIN_SAMPLE_SPLIT,
'max_depth': config.TREE_MAX_DEPTH}
forest_param = {'n_estimators': config.ESTIMATOR_NUM, 'min_samples_leaf': config.MIN_SAMPLE_LEAVES,
'min_samples_split': config.MIN_SAMPLE_SPLIT}
boosting_param = {'n_estimators': config.ESTIMATOR_NUM, 'learning_rate': config.LEARNING_RATE}
# Grid search definition
grid_searches = [
GridSearchCV(DecisionTreeClassifier(class_weight='balanced', random_state = random_seed),
tree_param, cv=inner_cv, n_jobs=job_num, scoring=config.SCORING_FUNCTION),
GridSearchCV(RandomForestClassifier(class_weight='balanced', n_jobs=job_num, random_state = random_seed),
forest_param, cv=inner_cv, n_jobs=job_num, scoring=config.SCORING_FUNCTION),
GridSearchCV(ExtraTreesClassifier(n_jobs=job_num, class_weight='balanced', random_state = random_seed),
forest_param, cv=inner_cv, n_jobs=job_num, scoring=config.SCORING_FUNCTION),
GridSearchCV(AdaBoostClassifier(base_estimator=DecisionTreeClassifier(class_weight = 'balanced',
random_state = random_seed,
max_depth=2),
algorithm='SAMME.R', random_state=random_seed),
boosting_param, cv=inner_cv, n_jobs=job_num, scoring=config.SCORING_FUNCTION)
]
# Fitting the classifiers
classification_results = {}
for model in grid_searches:
# Model training/testing
model.score_sample_weight = True
model.fit(data, label)
model_name = str(type(model.best_estimator_)).replace('<class \'', '').replace('\'>', '').split('.')[-1]
model_best_param = model.best_params_
predicted_label = cross_val_predict(model.best_estimator_, X=data, y=label, cv=outer_cv, n_jobs=job_num)
model_accuracy = accuracy_score(label, predicted_label)
model_confusion_matrix = confusion_matrix(label, predicted_label)
model_classification_report = classification_report(label, predicted_label)
classification_results[model_name] = {}
classification_results[model_name]['best_params'] = model_best_param
classification_results[model_name]['accuracy'] = model_accuracy
classification_results[model_name]['confusion_matrix'] = model_confusion_matrix.tolist()
classification_results[model_name]['classification_report'] = model_classification_report
print(model_classification_report)
## Save the classification result
#store_classification_result(model_name, language, model_classification_report, classification_results)
def get_best_decision_tree(data, label, random_seed=config.RANDOM_SEED, job_num=multiprocessing.cpu_count()):
"""
Trains the best decision tree
:param data: the data
:param label: the labels
:param random_seed: the random seed
:param job_num:
:return: the number of cores to use
"""
# CV
inner_cv = KFold(n_splits=config.FOLD_NUM, shuffle=True, random_state=random_seed)
# Train/test
tree_param = {'min_samples_leaf': config.MIN_SAMPLE_LEAVES, 'min_samples_split': config.MIN_SAMPLE_SPLIT,
'max_depth': config.TREE_MAX_DEPTH}
grid_search = GridSearchCV(DecisionTreeClassifier(class_weight='balanced', random_state=random_seed),
tree_param, cv=inner_cv, n_jobs=job_num, scoring=config.SCORING_FUNCTION)
grid_search.score_sample_weight = True
grid_search.fit(data, label)
return grid_search.best_estimator_
def get_feature_importance_by_model(model):
"""
Returns the features importance of a model
:param model: the classifier
:return: The list of feature importance
"""
return model.feature_importances_
def get_feature_set(data):
"""
Returns the feature sets separately
:param data: The input data
"""
# Data separation of feature sets
parallel_changes = data[:, 0].reshape(-1, 1)
commit_num = data[:, 1].reshape(-1, 1)
commit_density = data[:, 2].reshape(-1, 1)
file_edits = IncrementalPCA(n_components=1).fit_transform(data[:, 3:8])
line_edits = IncrementalPCA(n_components=1).fit_transform(data[:, 8:10])
dev_num = data[:, 10].reshape(-1, 1)
keywords = IncrementalPCA(n_components=1).fit_transform(data[:, 11:23])
message = IncrementalPCA(n_components=1).fit_transform(data[:, 23:27])
duration = data[:, 27].reshape(-1, 1)
feature_sets = ['prl_changes', 'commit_num', 'commit_density', 'file_edits', 'line_edits', 'dev_num',
'keywords', 'message', 'duration']
return feature_sets, parallel_changes, commit_num, commit_density, file_edits, line_edits, dev_num, keywords\
, message, duration
def save_feature_correlation(language, data, label):
"""
Store the feature correlation of the data with the label
:param language: the programming language
:param data: the data
:param label: the label
"""
feature_sets, parallel_changes, commit_num, commit_density, file_edits, line_edits, dev_num, keywords, message\
, duration = get_feature_set(data)
features = [parallel_changes, commit_num, commit_density, file_edits, line_edits, dev_num, keywords, message
, duration]
for i, feature in enumerate(features):
corr, p_value = spearmanr(feature, label)
open('{}feature_correlation_{}.txt'.format(config.PREDICTION_RESULT_PATH, language), 'a') \
.write('{}:\t\t{} \t {}\n'.format(feature_sets[i], round(corr, 2), round(p_value, 2)))
def save_feature_correlation_dict(data, label):
"""
Store the feature correlation of the data with the label
:param data: the data
:param label: the label
"""
feature_sets = ['prl_changes', 'commit_num', 'commit_density', 'file_edits', 'line_edits', 'dev_num',
'keywords', 'message', 'duration']
feature_sets, parallel_changes, commit_num, commit_density, file_edits, line_edits, dev_num, keywords, message\
, duration = get_feature_set(data)
features = [parallel_changes, commit_num, commit_density, file_edits, line_edits, dev_num, keywords, message
, duration]
correlation = {}
try:
for i, feature in enumerate(features):
corr, p_value = spearmanr(feature, label)
correlation[feature_sets[i] + '_corr'] = corr
correlation[feature_sets[i] + '_p_value'] = p_value
except:
pass
finally:
return correlation
def save_feature_importance(repo_name, data, label):
"""
Store the feature importance
:param language: the programming language
:param data: the data
:param label: the label
"""
data = data.values
feature_sets, parallel_changes, commit_num, commit_density, file_edits, line_edits, dev_num, keywords, message, duration \
= get_feature_set(data)
feature_data = np.concatenate((parallel_changes, commit_num, commit_density, file_edits, line_edits,
dev_num, keywords, message, duration), axis=1)
return get_feature_importance_by_model(get_best_decision_tree(feature_data, label))
def baseline_classification(language, data, label):
"""
Classify the baseline data (parallel changed files)
:param language: The programming language
:param data: The data
:param label: The labels
"""
feature_sets, parallel_changes, commit_num, commit_density, file_edits, line_edits, dev_num, keywords, message \
, duration = get_feature_set(data)
language = language + '__baseline'
data_classification(language, parallel_changes, label)
############################################
############################################
from sklearn import metrics
import autosklearn.classification
from sklearn.svm import SVC
def get_metrics(label_test, predicted_labels):
result = {}
result['roc_curve'] = metrics.roc_curve(label_test, predicted_labels)
result['confusion_matrix'] = metrics.confusion_matrix(label_test, predicted_labels)
result['classification_report'] = metrics.classification_report(label_test, predicted_labels)
result['accuracy_score'] = metrics.accuracy_score(label_test, predicted_labels)
result['roc_auc_score'] = metrics.roc_auc_score(label_test, predicted_labels)
result['precision_score_conflict'] = metrics.precision_score(label_test, predicted_labels)
result['precision_score_not_conflict'] = metrics.precision_score(label_test, predicted_labels,pos_label=0)
result['precision_score_average'] = metrics.precision_score(label_test, predicted_labels, average='weighted')
result['recall_score_conflict'] = metrics.recall_score(label_test, predicted_labels)
result['recall_score_not_conflict'] = metrics.recall_score(label_test, predicted_labels,pos_label=0)
result['recall_score_average'] = metrics.recall_score(label_test, predicted_labels, average='weighted')
result['f1_score_conflict'] = metrics.f1_score(label_test, predicted_labels)
result['f1_score_not_conflict'] = metrics.f1_score(label_test, predicted_labels,pos_label=0)
result['f1_score_average'] = metrics.f1_score(label_test, predicted_labels, average='weighted')
result['conflict_rate'] = len([i for i in label_test if i == 1]) / len(label_test)
return result
def get_decision_tree_result(data_train, label_train, data_test, label_test):
clf = DecisionTreeClassifier(class_weight='balanced').fit(data_train, label_train)
predicted_labels = clf.predict(data_test)
return get_metrics(label_test, predicted_labels)
def get_random_forest_result(data_train, label_train, data_test, label_test):
clf = RandomForestClassifier(class_weight='balanced').fit(data_train, label_train)
predicted_labels = clf.predict(data_test)
return get_metrics(label_test, predicted_labels)
def get_svm_result(data_train, label_train, data_test, label_test):
clf = SVC(C=1.0, kernel='linear', class_weight='balanced').fit(data_train, label_train)
predicted_labels = clf.predict(data_test)
return get_metrics(label_test, predicted_labels)
def get_auto_scikit_result(data_train, label_train, data_test, label_test):
automl = autosklearn.classification.AutoSklearnClassifier(
time_left_for_this_task= 60 * 60,
per_run_time_limit=300,
tmp_folder='/tmp/autosklearn_sequential_example_tmp1111',
output_folder='/tmp/autosklearn_sequential_example_out1111',
)
automl.fit(data_train, label_train, metric=autosklearn.metrics.roc_auc)
predicted_labels = automl.predict(data_test)
result = get_metrics(label_test, predicted_labels)
result['show_models'] = automl.show_models()
result['sprint_statistics'] = automl.sprint_statistics()
return result
if __name__ == "__main__":
# Logging
logging.basicConfig(level=logging.INFO,
format='%(levelname)s in %(threadName)s - %(asctime)s by %(name)-12s : %(message)s',
datefmt='%y-%m-%d %H:%M:%S')
logging.info('Train/test of merge conflict prediction')
# Data classification
data_files = glob.glob(config.PREDICTION_CSV_PATH + 'data_*')
label_files = glob.glob(config.PREDICTION_CSV_PATH + 'label_*')
repos_set = [files.split('/')[-1].split('_')[3].replace('.csv', '') for files in data_files]
classification_result = []
feature_importance = []
languages = []
corr = []
for ind, data_path in enumerate(data_files):
data_tmp = pd.read_csv(data_path).sort_values(by=['merge_commit_date'])
label_tmp = pd.read_csv(data_path.replace('data_prediction', 'label_prediction')).sort_values(by=['merge_commit_date'])
data_tmp = data_tmp.drop('merge_commit_date', axis=1)
label_tmp = label_tmp.drop('merge_commit_date', axis=1)
# Correlation
try:
tmp_corr = save_feature_correlation_dict(data_tmp.to_numpy(), label_tmp.to_numpy())
if len(tmp_corr) > 0:
tmp_corr['langugae'] = repo_lang.get_lang(repos_set[ind].lower())
tmp_corr['repository'] = repos_set[ind]
corr.append(tmp_corr)
except:
pass
continue
train_ind = int(data_tmp.shape[0] * config.TRAIN_RATE)
data_train = data_tmp.iloc[0:train_ind, :]
data_test = data_tmp.iloc[train_ind:-1, :]
label_train = label_tmp.iloc[0:train_ind, :]['is_conflict'].tolist()
label_test = label_tmp.iloc[train_ind:-1, :]['is_conflict'].tolist()
if len(label_test) != data_test.shape[0]:
print('Inconsistent data: {}'.format(repos_set[ind]))
continue
if data_test.shape[0] < 50:
print('Not enough merge scenarios: {}'.format(repos_set[ind]))
continue
if len(set(label_test)) != 2 or len(set(label_train)) != 2:
print('One class is missed: {}'.format(repos_set[ind]))
continue
if len([i for i in label_test if i == 1]) < 10:
print('Nor enough conflicting merge in the test batch for evaluation: {}'.format(repos_set[ind]))
continue
# k = k + data_tmp.shape[0]
try:
res = data_classification_wo_cv(repo_lang.get_lang(repos_set[ind].lower()), repos_set[ind] ,data_train, label_train, data_test, label_test)
classification_result = classification_result + res
feature_importance.append(save_feature_importance(repos_set[ind], data_train, label_train))
languages.append(repo_lang.get_lang(repos_set[ind].lower()))
except Exception as e:
print('Error - {}'.format(e))
continue
corr_df = pd.DataFrame(corr)
corr_df.to_csv(f'corr_{config.RANDOM_SEED}.csv')
exit()
# Feature importance
feature_importance = pd.DataFrame(feature_importance, columns=['prl_changes', 'commit_num', 'commit_density', 'file_edits', 'line_edits', 'dev_num',
'keywords', 'message', 'duration'])
feature_importance['language'] = pd.Series(languages)
feature_importance['repository'] = pd.Series(repos_set)
feature_importance.dropna()
feature_importance.to_csv(f'feature_importance_{config.RANDOM_SEED}.csv')
feature_importance_summery = feature_importance.drop('repository', axis=1).groupby('language').agg('median')
feature_importance_summery.to_csv(f'feature_importance_summery_{config.RANDOM_SEED}.csv')
# Classification | |
<gh_stars>1-10
##########################################################################
#
# Copyright (c) 2012, <NAME>. All rights reserved.
# Copyright (c) 2013, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of <NAME> nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import unittest
import random
import IECore
import Gaffer
import GafferScene
import GafferSceneTest
class PathMatcherTest( GafferSceneTest.SceneTestCase ) :
@staticmethod
def generatePaths( seed, depthRange, numChildrenRange ) :
nouns = [
"Ball", "Building", "Car", "Tree", "Rampart", "Head", "Arm",
"Window", "Door", "Trailer", "Light", "FlockOfBirds", "Herd", "Sheep",
"Cow", "Wing", "Engine", "Mast", "Rock", "Road", "Sign",
]
adjectives = [
"big", "red", "metallic", "left", "right", "top", "bottom", "wooden",
"front", "back", "lower", "upper", "magnificent", "hiRes", "loRes",
]
paths = []
def buildWalk( parent=IECore.InternedStringVectorData(), depth=1 ) :
if depth > random.randint( *depthRange ) :
return
for i in range( 0, random.randint( *numChildrenRange ) ) :
path = parent.copy()
path.append( random.choice( adjectives ) + random.choice( nouns ) + str( i ) )
paths.append( path )
buildWalk( path, depth + 1 )
random.seed( seed )
buildWalk()
return paths
def testMatch( self ) :
m = GafferScene.PathMatcher( [ "/a", "/red", "/b/c/d" ] )
for path, result in [
( "/a", GafferScene.Filter.Result.ExactMatch ),
( "/red", GafferScene.Filter.Result.ExactMatch ),
( "/re", GafferScene.Filter.Result.NoMatch ),
( "/redThing", GafferScene.Filter.Result.NoMatch ),
( "/b/c/d", GafferScene.Filter.Result.ExactMatch ),
( "/c", GafferScene.Filter.Result.NoMatch ),
( "/a/b", GafferScene.Filter.Result.AncestorMatch ),
( "/blue", GafferScene.Filter.Result.NoMatch ),
( "/b/c", GafferScene.Filter.Result.DescendantMatch ),
] :
self.assertEqual( m.match( path ), result )
def testLookupScaling( self ) :
# this test provides a useful means of measuring performance when
# working on the PatchMatcher algorithm. it tests matchers
# for each of two different hierarchies :
#
# * a deep hierarchy with relatively few children at each branch point
# * a shallow hierarchy with large numbers of children at each branch point
#
# the tests build a matcher, and then assert that every path in the hierarchy is
# matched appropriately. uncomment the timers to get useful information printed out.
match = GafferScene.Filter.Result.ExactMatch
# deep hierarchy
paths = self.generatePaths( seed = 10, depthRange = ( 3, 14 ), numChildrenRange = ( 2, 6 ) )
t = IECore.Timer()
matcher = GafferScene.PathMatcher( paths )
#print "BUILD DEEP", t.stop()
t = IECore.Timer()
for path in paths :
self.assertTrue( matcher.match( path ) & match )
#print "LOOKUP DEEP", t.stop()
# shallow hierarchy
paths = self.generatePaths( seed = 10, depthRange = ( 2, 2 ), numChildrenRange = ( 500, 1000 ) )
t = IECore.Timer()
matcher = GafferScene.PathMatcher( paths )
#print "BUILD SHALLOW", t.stop()
t = IECore.Timer()
for path in paths :
self.assertTrue( matcher.match( path ) & match )
#print "LOOKUP SHALLOW", t.stop()
def testDefaultConstructor( self ) :
m = GafferScene.PathMatcher()
self.assertEqual( m.match( "/" ), GafferScene.Filter.Result.NoMatch )
def testWildcards( self ) :
f = GafferScene.PathFilter()
f["paths"].setValue(
IECore.StringVectorData( [
"/a",
"/red*",
"/green*Bloke*",
"/somewhere/over/the/*",
"/somewhere/over/the/*/skies/are/blue",
] )
)
for path, result in [
( "/a", f.Result.ExactMatch ),
( "/redBoots", f.Result.ExactMatch ),
( "/red", f.Result.ExactMatch ),
( "/redWellies", f.Result.ExactMatch ),
( "/redWellies/in/puddles", f.Result.AncestorMatch ),
( "/greenFatBloke", f.Result.ExactMatch ),
( "/greenBloke", f.Result.ExactMatch ),
( "/greenBlokes", f.Result.ExactMatch ),
( "/somewhere/over/the/rainbow", f.Result.ExactMatch | f.Result.DescendantMatch ),
( "/somewhere/over/the", f.Result.DescendantMatch ),
( "/somewhere/over", f.Result.DescendantMatch ),
( "/somewhere", f.Result.DescendantMatch ),
( "/somewhere/over/the/rainbow/skies/are/blue", f.Result.ExactMatch | f.Result.AncestorMatch ),
( "/somewhere/over/the/rainbow/skies/are", f.Result.DescendantMatch | f.Result.AncestorMatch ),
( "/somewhere/over/the/astonExpressway/skies/are", f.Result.DescendantMatch | f.Result.AncestorMatch ),
( "/somewhere/over/the/astonExpressway/skies/are/blue", f.Result.ExactMatch | f.Result.AncestorMatch ),
( "/somewhere/over/the/astonExpressway/skies/are/grey", f.Result.AncestorMatch ),
] :
c = Gaffer.Context()
c["scene:path"] = IECore.InternedStringVectorData( path[1:].split( "/" ) )
with c :
self.assertEqual( f["out"].getValue(), int( result ) )
def testWildcardsWithSiblings( self ) :
f = GafferScene.PathFilter()
f["paths"].setValue(
IECore.StringVectorData( [
"/a/*/b",
"/a/a*/c",
] )
)
for path, result in [
( "/a/aThing/c", f.Result.ExactMatch ),
( "/a/aThing/b", f.Result.ExactMatch ),
] :
c = Gaffer.Context()
c["scene:path"] = IECore.InternedStringVectorData( path[1:].split( "/" ) )
with c :
self.assertEqual( f["out"].getValue(), int( result ) )
def testRepeatedWildcards( self ) :
f = GafferScene.PathFilter()
f["paths"].setValue(
IECore.StringVectorData( [
"/a/**s",
] )
)
c = Gaffer.Context()
c["scene:path"] = IECore.InternedStringVectorData( [ "a", "s" ] )
with c :
self.assertEqual( f["out"].getValue(), int( GafferScene.Filter.Result.ExactMatch ) )
def testEllipsis( self ) :
f = GafferScene.PathFilter()
f["paths"].setValue(
IECore.StringVectorData( [
"/a/.../b*",
"/a/c",
] )
)
for path, result in [
( "/a/ball", f.Result.ExactMatch | f.Result.DescendantMatch ),
( "/a/red/ball", f.Result.ExactMatch | f.Result.DescendantMatch ),
( "/a/red/car", f.Result.DescendantMatch ),
( "/a/big/red/ball", f.Result.ExactMatch | f.Result.DescendantMatch | f.Result.AncestorMatch ),
( "/a/lovely/shiny/bicyle", f.Result.ExactMatch | f.Result.DescendantMatch ),
( "/a/c", f.Result.ExactMatch | f.Result.DescendantMatch ),
( "/a/d", f.Result.DescendantMatch ),
( "/a/anything", f.Result.DescendantMatch ),
( "/a/anything/really", f.Result.DescendantMatch ),
( "/a/anything/at/all", f.Result.DescendantMatch ),
( "/b/anything/at/all", f.Result.NoMatch ),
] :
c = Gaffer.Context()
c["scene:path"] = IECore.InternedStringVectorData( path[1:].split( "/" ) )
with c :
self.assertEqual( f["out"].getValue(), int( result ) )
def testEllipsisWithMultipleBranches( self ) :
f = GafferScene.PathFilter()
f["paths"].setValue(
IECore.StringVectorData( [
"/a/.../b*",
"/a/.../c*",
] )
)
for path, result in [
( "/a/ball", f.Result.ExactMatch | f.Result.DescendantMatch ),
( "/a/red/ball", f.Result.ExactMatch | f.Result.DescendantMatch ),
( "/a/red/car", f.Result.ExactMatch | f.Result.DescendantMatch ),
( "/a/big/red/ball", f.Result.ExactMatch | f.Result.DescendantMatch | f.Result.AncestorMatch ),
( "/a/lovely/shiny/bicyle", f.Result.ExactMatch | f.Result.DescendantMatch ),
( "/a/c", f.Result.ExactMatch | f.Result.DescendantMatch ),
( "/a/d", f.Result.DescendantMatch ),
( "/a/anything", f.Result.DescendantMatch ),
( "/a/anything/really", f.Result.DescendantMatch ),
( "/a/anything/at/all", f.Result.DescendantMatch ),
( "/b/anything/at/all", f.Result.NoMatch ),
] :
c = Gaffer.Context()
c["scene:path"] = IECore.InternedStringVectorData( path[1:].split( "/" ) )
with c :
self.assertEqual( f["out"].getValue(), int( result ) )
def testEllipsisAsTerminator( self ) :
f = GafferScene.PathFilter()
f["paths"].setValue(
IECore.StringVectorData( [
"/a/...",
] )
)
for path, result in [
( "/a", f.Result.ExactMatch | f.Result.DescendantMatch ),
( "/a/ball", f.Result.ExactMatch | f.Result.DescendantMatch | f.Result.AncestorMatch ),
( "/a/red/car", f.Result.ExactMatch | f.Result.DescendantMatch | f.Result.AncestorMatch ),
( "/a/red/car/rolls", f.Result.ExactMatch | f.Result.DescendantMatch | f.Result.AncestorMatch ),
( "/a/terminating/ellipsis/matches/everything/below/it", f.Result.ExactMatch | f.Result.DescendantMatch | f.Result.AncestorMatch ),
] :
c = Gaffer.Context()
c["scene:path"] = IECore.InternedStringVectorData( path[1:].split( "/" ) )
with c :
self.assertEqual( f["out"].getValue(), int( result ) )
def testCopyConstructorAppearsDeep( self ) :
m = GafferScene.PathMatcher( [ "/a" ] )
self.assertEqual( m.match( "/a" ), GafferScene.Filter.Result.ExactMatch )
m2 = GafferScene.PathMatcher( m )
self.assertEqual( m2.match( "/a" ), GafferScene.Filter.Result.ExactMatch )
m.clear()
self.assertEqual( m.match( "/a" ), GafferScene.Filter.Result.NoMatch )
self.assertEqual( m2.match( "/a" ), GafferScene.Filter.Result.ExactMatch )
def testAddAndRemovePaths( self ) :
m = GafferScene.PathMatcher()
m.addPath( "/a" )
m.addPath( "/a/b" )
self.assertEqual( m.match( "/a" ), GafferScene.Filter.Result.ExactMatch | GafferScene.Filter.Result.DescendantMatch )
self.assertEqual( m.match( "/a/b" ), GafferScene.Filter.Result.ExactMatch | GafferScene.Filter.Result.AncestorMatch )
m.removePath( "/a" )
self.assertEqual( m.match( "/a" ), GafferScene.Filter.Result.DescendantMatch )
self.assertEqual( m.match( "/a/b" ), GafferScene.Filter.Result.ExactMatch )
m.removePath( "/a/b" )
self.assertEqual( m.match( "/a" ), GafferScene.Filter.Result.NoMatch )
self.assertEqual( m.match( "/a/b" ), GafferScene.Filter.Result.NoMatch )
def testRemovePathRemovesIntermediatePaths( self ) :
m = GafferScene.PathMatcher()
m.addPath( "/a/b/c" )
self.assertEqual( m.match( "/a" ), GafferScene.Filter.Result.DescendantMatch )
self.assertEqual( m.match( "/a/b" ), GafferScene.Filter.Result.DescendantMatch )
self.assertEqual( m.match( "/a/b/c" ), GafferScene.Filter.Result.ExactMatch )
m.removePath( "/a/b/c" )
self.assertEqual( m.match( "/a" ), GafferScene.Filter.Result.NoMatch )
self.assertEqual( m.match( "/a/b" ), GafferScene.Filter.Result.NoMatch )
self.assertEqual( m.match( "/a/b/c" ), GafferScene.Filter.Result.NoMatch )
def testRemoveEllipsis( self ) :
m = GafferScene.PathMatcher()
m.addPath( "/a/.../b" )
self.assertEqual( m.match( "/a" ), GafferScene.Filter.Result.DescendantMatch )
self.assertEqual( m.match( "/a/c" ), GafferScene.Filter.Result.DescendantMatch )
self.assertEqual( m.match( "/a/c/b" ), GafferScene.Filter.Result.ExactMatch | GafferScene.Filter.Result.DescendantMatch )
m.removePath( "/a/.../b" )
self.assertEqual( m.match( "/a" ), GafferScene.Filter.Result.NoMatch )
self.assertEqual( m.match( "/a/c" ), GafferScene.Filter.Result.NoMatch )
self.assertEqual( m.match( "/a/c/b" ), GafferScene.Filter.Result.NoMatch )
def testAddPathReturnValue( self ) :
m = GafferScene.PathMatcher()
self.assertEqual( m.addPath( "/" ), True )
self.assertEqual( m.addPath( "/a/b" ), True )
self.assertEqual( m.addPath( "/a/b" ), False )
self.assertEqual( m.addPath( "/a" ), True )
self.assertEqual( m.addPath( "/" ), False )
m = GafferScene.PathMatcher()
self.assertEqual( m.addPath( "/a/b/c" ), True )
self.assertEqual( m.addPath( "/a/b/c" ), False )
self.assertEqual( m.addPath( "/" ), True )
self.assertEqual( m.addPath( "/*" ), True )
self.assertEqual( m.addPath( "/*" ), False )
self.assertEqual( m.addPath( "/..." ), True )
self.assertEqual( m.addPath( "/..." ), False )
self.assertEqual( m.addPath( "/a/b/c/d" ), True )
self.assertEqual( m.addPath( "/a/b/c/d" ), False )
m.removePath( "/a/b/c/d" )
self.assertEqual( m.addPath( "/a/b/c/d" ), True )
self.assertEqual( m.addPath( "/a/b/c/d" ), False )
def testRemovePathReturnValue( self ) :
m = GafferScene.PathMatcher()
self.assertEqual( m.removePath( "/" ), False )
m.addPath( "/" )
self.assertEqual( m.removePath( "/" ), True )
self.assertEqual( m.removePath( "/" ), False )
self.assertEqual( m.removePath( "/a/b/c" ), False )
m.addPath( "/a/b/c" )
self.assertEqual( m.removePath( "/a/b/c" ), True )
self.assertEqual( m.removePath( "/a/b/c" ), False )
def testEquality( self ) :
m1 = GafferScene.PathMatcher()
m2 = GafferScene.PathMatcher()
self.assertEqual( m1, m2 )
m1.addPath( "/a" )
self.assertNotEqual( m1, m2 )
m2.addPath( "/a" )
self.assertEqual( m1, m2 )
m2.addPath( "/a/b" )
self.assertNotEqual( m1, m2 )
m1.addPath( "/a/b" )
self.assertEqual( m1, m2 )
m1.addPath( "/a/b/.../c" )
self.assertNotEqual( m1, m2 )
m2.addPath( "/a/b/.../c" )
self.assertEqual( m1, m2 )
m2.addPath( "/c*" )
self.assertNotEqual( m1, m2 )
m1.addPath( "/c*" )
self.assertEqual( m1, m2 )
def testPaths( self ) :
m = GafferScene.PathMatcher()
self.assertEqual( m.paths(), [] )
m.addPath( | |
= rest_get(host, secure_boot_uri, None, iLO_loginname, iLO_password)
# if the BIOS doesn't support PATCH, go get the Settings, which should
if not operation_allowed(headers, 'PATCH'): # this is GET-only
secure_boot_uri = secure_boot_settings['links']['Settings']['href']
status, headers, boot_settings = rest_get(host, secure_boot_uri, None, iLO_loginname, iLO_password)
assert(operation_allowed(headers, 'PATCH')) # this allows PATCH
# we don't need to PATCH back everything, just the one property we want to change
new_secure_boot_settings = dict()
new_secure_boot_settings['SecureBootEnable'] = secure_boot_enable
# perform the patch
print('PATCH ' + json.dumps(new_secure_boot_settings) + ' to ' + secure_boot_uri)
status, headers, response = rest_patch(host, secure_boot_uri, None, new_secure_boot_settings, iLO_loginname, iLO_password)
print('PATCH response = ' + str(status))
print_extended_error(response)
assert(status < 300)
# point made...quit
break
# noinspection PyPep8Naming
def ex4_bios_revert_default(host, iLO_loginname, iLO_password, default_overrides=None):
if not default_overrides: default_overrides = {}
print('EXAMPLE 4: Revert BIOS Settings to default')
# for each system in the systems collection at /rest/v1/Systems
for status, headers, system, memberuri in collection(host, '/rest/v1/Systems', None, iLO_loginname, iLO_password):
# verify expected type
# hint: don't limit to version 0 here as we will rev to 1.0 at some point hopefully with minimal changes
assert(get_type(system) == 'ComputerSystem.0' or get_type(system) == 'ComputerSystem.1')
# find the BIOS URI
if 'links' not in system['Oem']['Hp'] or 'BIOS' not in system['Oem']['Hp']['links']:
print('\tBIOS Settings resource or feature is not supported on this system')
return
bios_uri = system['Oem']['Hp']['links']['BIOS']['href']
# get the BIOS object
status, headers, bios_settings = rest_get(host, bios_uri, None, iLO_loginname, iLO_password)
# if the BIOS doesn't support PUT, go get the Settings, which should
if not operation_allowed(headers, 'PUT'): # this is GET-only
if 'Settings' not in bios_settings['links']:
print('No BIOS settings resources allow PUT')
return
bios_uri = bios_settings['links']['Settings']['href']
status, headers, bios_settings = rest_get(host, bios_uri, None, iLO_loginname, iLO_password)
assert(operation_allowed(headers, 'PUT')) # this allows PUT
# we don't need to PUT back everything, just the one property we want to change
new_bios_settings = dict()
new_bios_settings['BaseConfig'] = 'default'
# preserve the Type property from the existing BIOS settings to avoid an error
new_bios_settings['Type'] = bios_settings['Type']
# add in any caller-supplied override properties
for override in default_overrides:
new_bios_settings[override] = default_overrides[override]
# perform the patch
print('PUT ' + json.dumps(new_bios_settings) + ' to ' + bios_uri)
status, headers, response = rest_put(host, bios_uri, None, new_bios_settings, iLO_loginname, iLO_password)
print('PUT response = ' + str(status))
print_extended_error(response)
assert(status < 300)
# point made...quit
break
# noinspection PyPep8Naming
def ex5_change_boot_order(host, iLO_loginname, iLO_password, bios_password):
print('EXAMPLE 5: Change Boot Order (UEFI)')
# for each system in the systems collection at /rest/v1/Systems
for status, headers, system, memberuri in collection(host, '/rest/v1/Systems', None, iLO_loginname, iLO_password):
# verify expected type
# hint: don't limit to version 0 here as we will rev to 1.0 at some point hopefully with minimal changes
assert(get_type(system) == 'ComputerSystem.0' or get_type(system) == 'ComputerSystem.1')
# find the BIOS URI
if 'links' not in system['Oem']['Hp'] or 'BIOS' not in system['Oem']['Hp']['links']:
print('\tBIOS Settings resource or feature is not supported on this system')
return
bios_uri = system['Oem']['Hp']['links']['BIOS']['href']
# get the BIOS object
status, headers, bios_settings = rest_get(host, bios_uri, None, iLO_loginname, iLO_password)
# get the BOOT object
if 'Boot' not in bios_settings['links']:
print('\t"links" section in Bios settings does not have a Boot order resource')
return
boot_uri = bios_settings['links']['Boot']['href']
status, headers, boot_settings = rest_get(host, boot_uri, None, iLO_loginname, iLO_password)
# if the BIOS doesn't support PATCH, go get the Settings, which should
if not operation_allowed(headers, 'PATCH'): # this is GET-only
boot_uri = boot_settings['links']['Settings']['href']
status, headers, boot_settings = rest_get(host, boot_uri, None, iLO_loginname, iLO_password)
assert(operation_allowed(headers, 'PATCH')) # this allows PATCH
# we don't need to PATCH back everything, just the one property we want to change
new_boot_settings = dict()
new_boot_settings['PersistentBootConfigOrder'] = boot_settings['PersistentBootConfigOrder']
# TODO - rearrange new_boot_settings['PersistentBootConfigOrder'] with the desired order
# supply the BIOS setup iLO_password
request_headers = dict()
if bios_password:
bios_password_hash = <PASSWORD>(<PASSWORD>()).hexdigest().upper()
request_headers['X-HPRESTFULAPI-AuthToken'] = bios_password_hash
# perform the patch
print('PATCH ' + json.dumps(new_boot_settings) + ' to ' + boot_uri)
status, headers, response = rest_patch(host, boot_uri, request_headers, new_boot_settings, iLO_loginname, iLO_password)
print('PATCH response = ' + str(status))
print_extended_error(response)
assert(status < 300)
# point made...quit
break
# noinspection PyPep8Naming
def ex6_change_temporary_boot_order(host, boottarget, iLO_loginname, iLO_password):
print('EXAMPLE 6: Change temporary boot order (one time boot or temporary override)')
# for each system in the systems collection at /rest/v1/Systems
for status, headers, system, memberuri in collection(host, '/rest/v1/Systems', None, iLO_loginname, iLO_password):
# verify expected type
# hint: don't limit to version 0 here as we will rev to 1.0 at some point hopefully with minimal changes
assert(get_type(system) == 'ComputerSystem.0' or get_type(system) == 'ComputerSystem.1')
# verify it supports PATCH
assert(operation_allowed(headers, 'PATCH'))
# verify the requested boot target is supported
if boottarget in system['Boot']['BootSourceOverrideSupported']:
# build a PATCH payload to change to the requested boot target
boot = dict()
boot['Boot'] = dict()
boot['Boot']['BootSourceOverrideTarget'] = boottarget
# perform the POST action
print('PATCH ' + json.dumps(boot) + ' to ' + memberuri)
status, headers, response = rest_patch(host, memberuri, None, boot, iLO_loginname, iLO_password)
print('PATCH response = ' + str(status))
print_extended_error(response)
else: # target not in supported list
print('\tBootSourceOverrideTarget value "' + boottarget + '" is not supported. Valid values are:')
for tgt in system['Boot']['BootSourceOverrideSupported']:
print('\t\t' + tgt)
# point made...quit
break
# noinspection PyPep8Naming
def ex7_find_iLO_MAC_address(host, iLO_loginname, iLO_password):
print("EXAMPLE 7: Find iLO's MAC Addresses")
# for each system in the systems collection at /rest/v1/Systems
for status, headers, manager, memberuri in collection(host, '/rest/v1/Managers', None, iLO_loginname, iLO_password):
# verify expected type
# hint: don't limit to version 0 here as we will rev to 1.0 at some point hopefully with minimal changes
assert(get_type(manager) == 'Manager.0' or get_type(manager) == 'Manager.1')
# for each system in the systems collection at /rest/v1/Systems
for status, headers, nic, memberuri in collection(host, manager['links']['EthernetNICs']['href'], None, iLO_loginname, iLO_password):
# verify expected type
# hint: don't limit to version 0 here as we will rev to 1.0 at some point hopefully with minimal changes
assert(get_type(nic) == 'EthernetNetworkInterface.0' or get_type(nic) == 'EthernetNetworkInterface.1')
if 'MacAddress' not in nic:
print('\tNIC resource does not contain "MacAddress" property')
else:
print('\t' + manager['Model'] + ' ' + nic['Name'] + ' = ' + nic['MacAddress'] + '\t(' + nic['Status']['State'] + ')')
# noinspection PyPep8Naming
def ex8_add_iLO_user_account(host, iLO_loginname, iLO_password, new_iLO_loginname, new_iLO_username, new_iLO_password, irc=False, cfg=False, vm=False, usercfg=False, vpr=False):
print('EXAMPLE 8: Create an iLO User Account')
# get the URI of the Accounts collection (not standardized)
status, headers, obj = rest_get(host, '/rest/v1/AccountService', None, iLO_loginname, iLO_password)
assert(status == 200)
account_collection = obj['links']['Accounts']['href']
# build up a new account object to create
# iLO has two user account properties:
# Login name = the string used as the user identity to log in - we use this for 'UserName'
# User name = the friendly (or full) name of the user
# Potentially easy to reverse, so be careful - use the iLO account login name as 'UserName' in the API
user = {'UserName': new_iLO_loginname, 'Password': <PASSWORD>, 'Oem': {}}
# Supply the full name as LoginName
user['Oem']['Hp'] = {}
user['Oem']['Hp']['LoginName'] = new_iLO_username # again this is tricky: LoginName gets the friendly user name
# plug in the requested privileges, by default you get LoginPriv and nothing else
user['Oem']['Hp']['Privileges'] = {}
user['Oem']['Hp']['Privileges']['RemoteConsolePriv'] = irc
user['Oem']['Hp']['Privileges']['iLOConfigPriv'] = cfg
user['Oem']['Hp']['Privileges']['VirtualMediaPriv'] = vm
user['Oem']['Hp']['Privileges']['UserConfigPriv'] = usercfg
user['Oem']['Hp']['Privileges']['VirtualPowerAndResetPriv'] = vpr
# create the account
print('POST ' + json.dumps(user) + ' to ' + account_collection)
status, headers, response = rest_post(host, account_collection, None, user, iLO_loginname, iLO_password)
print('POST response = ' + str(status))
print_extended_error(response)
if status == 201:
# this is the new account URI
new_account_uri = headers['location'] # HTTP headers are not case sensitive
print('Account ' + new_account_uri + ' created')
# get the new account resource
# it is possible that a future version of iLO will simply return the new account resource in the create response
status, headers, acct = rest_get(host, urlparse(new_account_uri).path, None, iLO_loginname, iLO_password)
assert(status == 200)
#print('Account info: ' + json.dumps(acct, indent=4))
# demonstration of how to remove the account using the Location header
#status, headers, response = rest_delete(host, urlparse(new_account_uri).path, None, iLO_loginname, iLO_password)
#assert(status == 200)
#print('Account ' + new_account_uri + ' removed')
# noinspection PyPep8Naming
def ex9_modify_iLO_user_account(host, iLO_loginname, iLO_password, iLO_login_name_to_modify, new_loginname=None, new_username=None, new_password=<PASSWORD>, irc=None, cfg=None, vm=None, | |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Module implementing RNN Cells that used to be in core.
@@EmbeddingWrapper
@@InputProjectionWrapper
@@OutputProjectionWrapper
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import embedding_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import rnn_cell_impl
from tensorflow.python.ops import variable_scope as vs
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util import nest
# pylint: disable=protected-access,invalid-name
RNNCell = rnn_cell_impl.RNNCell
_like_rnncell = rnn_cell_impl._like_rnncell
_WEIGHTS_VARIABLE_NAME = rnn_cell_impl._WEIGHTS_VARIABLE_NAME
_BIAS_VARIABLE_NAME = rnn_cell_impl._BIAS_VARIABLE_NAME
# pylint: enable=protected-access,invalid-name
class _Linear(object):
"""Linear map: sum_i(args[i] * W[i]), where W[i] is a variable.
Args:
args: a 2D Tensor or a list of 2D, batch, n, Tensors.
output_size: int, second dimension of weight variable.
dtype: data type for variables.
build_bias: boolean, whether to build a bias variable.
bias_initializer: starting value to initialize the bias
(default is all zeros).
kernel_initializer: starting value to initialize the weight.
Raises:
ValueError: if inputs_shape is wrong.
"""
def __init__(self,
args,
output_size,
build_bias,
bias_initializer=None,
kernel_initializer=None):
self._build_bias = build_bias
if args is None or (nest.is_sequence(args) and not args):
raise ValueError("`args` must be specified")
if not nest.is_sequence(args):
args = [args]
self._is_sequence = False
else:
self._is_sequence = True
# Calculate the total size of arguments on dimension 1.
total_arg_size = 0
shapes = [a.get_shape() for a in args]
for shape in shapes:
if shape.ndims != 2:
raise ValueError("linear is expecting 2D arguments: %s" % shapes)
if shape[1].value is None:
raise ValueError("linear expects shape[1] to be provided for shape %s, "
"but saw %s" % (shape, shape[1]))
else:
total_arg_size += shape[1].value
dtype = [a.dtype for a in args][0]
scope = vs.get_variable_scope()
with vs.variable_scope(scope) as outer_scope:
self._weights = vs.get_variable(
_WEIGHTS_VARIABLE_NAME, [total_arg_size, output_size],
dtype=dtype,
initializer=kernel_initializer)
if build_bias:
with vs.variable_scope(outer_scope) as inner_scope:
inner_scope.set_partitioner(None)
if bias_initializer is None:
bias_initializer = init_ops.constant_initializer(0.0, dtype=dtype)
self._biases = vs.get_variable(
_BIAS_VARIABLE_NAME, [output_size],
dtype=dtype,
initializer=bias_initializer)
def __call__(self, args):
if not self._is_sequence:
args = [args]
if len(args) == 1:
res = math_ops.matmul(args[0], self._weights)
else:
# Explicitly creating a one for a minor performance improvement.
one = constant_op.constant(1, dtype=dtypes.int32)
res = math_ops.matmul(array_ops.concat(args, one), self._weights)
if self._build_bias:
res = nn_ops.bias_add(res, self._biases)
return res
# TODO(xpan): Remove this function in a follow up.
def _linear(args,
output_size,
bias,
bias_initializer=None,
kernel_initializer=None):
"""Linear map: sum_i(args[i] * W[i]), where W[i] is a variable.
Args:
args: a 2D Tensor or a list of 2D, batch, n, Tensors.
output_size: int, second dimension of W[i].
bias: boolean, whether to add a bias term or not.
bias_initializer: starting value to initialize the bias
(default is all zeros).
kernel_initializer: starting value to initialize the weight.
Returns:
A 2D Tensor with shape `[batch, output_size]` equal to
sum_i(args[i] * W[i]), where W[i]s are newly created matrices.
Raises:
ValueError: if some of the arguments has unspecified or wrong shape.
"""
if args is None or (nest.is_sequence(args) and not args):
raise ValueError("`args` must be specified")
if not nest.is_sequence(args):
args = [args]
# Calculate the total size of arguments on dimension 1.
total_arg_size = 0
shapes = [a.get_shape() for a in args]
for shape in shapes:
if shape.ndims != 2:
raise ValueError("linear is expecting 2D arguments: %s" % shapes)
if shape[1].value is None:
raise ValueError("linear expects shape[1] to be provided for shape %s, "
"but saw %s" % (shape, shape[1]))
else:
total_arg_size += shape[1].value
dtype = [a.dtype for a in args][0]
# Now the computation.
scope = vs.get_variable_scope()
with vs.variable_scope(scope) as outer_scope:
weights = vs.get_variable(
_WEIGHTS_VARIABLE_NAME, [total_arg_size, output_size],
dtype=dtype,
initializer=kernel_initializer)
if len(args) == 1:
res = math_ops.matmul(args[0], weights)
else:
res = math_ops.matmul(array_ops.concat(args, 1), weights)
if not bias:
return res
with vs.variable_scope(outer_scope) as inner_scope:
inner_scope.set_partitioner(None)
if bias_initializer is None:
bias_initializer = init_ops.constant_initializer(0.0, dtype=dtype)
biases = vs.get_variable(
_BIAS_VARIABLE_NAME, [output_size],
dtype=dtype,
initializer=bias_initializer)
return nn_ops.bias_add(res, biases)
class EmbeddingWrapper(RNNCell):
"""Operator adding input embedding to the given cell.
Note: in many cases it may be more efficient to not use this wrapper,
but instead concatenate the whole sequence of your inputs in time,
do the embedding on this batch-concatenated sequence, then split it and
feed into your RNN.
"""
def __init__(self,
cell,
embedding_classes,
embedding_size,
initializer=None,
reuse=None):
"""Create a cell with an added input embedding.
Args:
cell: an RNNCell, an embedding will be put before its inputs.
embedding_classes: integer, how many symbols will be embedded.
embedding_size: integer, the size of the vectors we embed into.
initializer: an initializer to use when creating the embedding;
if None, the initializer from variable scope or a default one is used.
reuse: (optional) Python boolean describing whether to reuse variables
in an existing scope. If not `True`, and the existing scope already has
the given variables, an error is raised.
Raises:
TypeError: if cell is not an RNNCell.
ValueError: if embedding_classes is not positive.
"""
super(EmbeddingWrapper, self).__init__(_reuse=reuse)
if not _like_rnncell(cell):
raise TypeError("The parameter cell is not RNNCell.")
if embedding_classes <= 0 or embedding_size <= 0:
raise ValueError("Both embedding_classes and embedding_size must be > 0: "
"%d, %d." % (embedding_classes, embedding_size))
self._cell = cell
self._embedding_classes = embedding_classes
self._embedding_size = embedding_size
self._initializer = initializer
@property
def state_size(self):
return self._cell.state_size
@property
def output_size(self):
return self._cell.output_size
def zero_state(self, batch_size, dtype):
with ops.name_scope(type(self).__name__ + "ZeroState", values=[batch_size]):
return self._cell.zero_state(batch_size, dtype)
def call(self, inputs, state):
"""Run the cell on embedded inputs."""
with ops.device("/cpu:0"):
if self._initializer:
initializer = self._initializer
elif vs.get_variable_scope().initializer:
initializer = vs.get_variable_scope().initializer
else:
# Default initializer for embeddings should have variance=1.
sqrt3 = math.sqrt(3) # Uniform(-sqrt(3), sqrt(3)) has variance=1.
initializer = init_ops.random_uniform_initializer(-sqrt3, sqrt3)
if isinstance(state, tuple):
data_type = state[0].dtype
else:
data_type = state.dtype
embedding = vs.get_variable(
"embedding", [self._embedding_classes, self._embedding_size],
initializer=initializer,
dtype=data_type)
embedded = embedding_ops.embedding_lookup(embedding,
array_ops.reshape(inputs, [-1]))
return self._cell(embedded, state)
class InputProjectionWrapper(RNNCell):
"""Operator adding an input projection to the given cell.
Note: in many cases it may be more efficient to not use this wrapper,
but instead concatenate the whole sequence of your inputs in time,
do the projection on this batch-concatenated sequence, then split it.
"""
def __init__(self,
cell,
num_proj,
activation=None,
input_size=None,
reuse=None):
"""Create a cell with input projection.
Args:
cell: an RNNCell, a projection of inputs is added before it.
num_proj: Python integer. The dimension to project to.
activation: (optional) an optional activation function.
input_size: Deprecated and unused.
reuse: (optional) Python boolean describing whether to reuse variables
in an existing scope. If not `True`, and the existing scope already has
the given variables, an error is raised.
Raises:
TypeError: if cell is not an RNNCell.
"""
super(InputProjectionWrapper, self).__init__(_reuse=reuse)
if input_size is not None:
logging.warn("%s: The input_size parameter is deprecated.", self)
if not _like_rnncell(cell):
raise TypeError("The parameter cell is not RNNCell.")
self._cell = cell
self._num_proj = num_proj
self._activation = activation
self._linear = None
@property
def state_size(self):
return self._cell.state_size
@property
def output_size(self):
return self._cell.output_size
def zero_state(self, batch_size, dtype):
with ops.name_scope(type(self).__name__ + "ZeroState", values=[batch_size]):
return self._cell.zero_state(batch_size, dtype)
def call(self, inputs, state):
"""Run the input projection and then the cell."""
# Default scope: "InputProjectionWrapper"
if self._linear is None:
self._linear = _Linear(inputs, self._num_proj, True)
projected = self._linear(inputs)
if self._activation:
projected = self._activation(projected)
return self._cell(projected, state)
class OutputProjectionWrapper(RNNCell):
"""Operator adding an output projection to the given cell.
Note: in many cases it may be more efficient to not use this wrapper,
but instead concatenate the whole sequence of your outputs in time,
do the projection on this batch-concatenated sequence, then split it
if needed or directly feed into a softmax.
"""
def __init__(self, cell, output_size, activation=None, reuse=None):
"""Create a cell with output projection.
Args:
cell: an RNNCell, a projection to output_size is added to it.
output_size: integer, the size of the output after projection.
activation: (optional) an optional activation | |
<reponame>r1mikey/research-unix-v7
#!/usr/bin/env python3
from collections import deque
import os
import time
import sys
import struct
"""
struct filsys {
u16 s_isize; /* size in blocks of i-list */
v7_daddr_t s_fsize; /* size in blocks of entire volume */
i16 s_nfree; /* number of addresses in s_free */
v7_daddr_t s_free[NICFREE];/* free block list */
i16 s_ninode; /* number of i-nodes in s_inode */
v7_ino_t s_inode[NICINOD];/* free i-node list */
char s_flock; /* lock during free list manipulation */
char s_ilock; /* lock during i-list manipulation */
char s_fmod; /* super block modified flag */
char s_ronly; /* mounted read-only flag */
v7_time_t s_time; /* last super block update */
/* remainder not maintained by this version of the system */
v7_daddr_t s_tfree; /* total free blocks*/
v7_ino_t s_tinode; /* total free inodes */
i16 s_m; /* interleave factor */
i16 s_n; /* " " */
char s_fname[6]; /* file system name */
char s_fpack[6]; /* file system pack name */
};
struct dinode
{
u16 di_mode; /* 0 2 2 mode and type of file */
i16 di_nlink; /* 2 2 4 number of links to file */
i16 di_uid; /* 4 2 6 owner's user id */
i16 di_gid; /* 6 2 8 owner's group id */
v7_off_t di_size; /* 8 4 12 number of bytes in file */
char di_addr[40]; /* 12 40 52 disk block addresses (13 * 3 byte addresses) - last three are indirect, double-indirect, triple indirect */
v7_time_t di_atime; /* 52 4 56 time last accessed */
v7_time_t di_mtime; /* 56 4 60 time last modified */
v7_time_t di_ctime; /* 60 4 64 time created */
};
#define INOPB 8 /* 8 inodes per block */
#define NADDR 13 /* number of addressed blocks in a inode: 0..NADDR-4 are direct, -3 is indirect, -2 is double indirect, -1 is triple indorect */
#define MAXFN 500
int f_n = MAXFN;
int f_m = 3;
"""
CONSTRUCTION_TS = int(time.time())
BSIZE = 512 # size of secondary block (bytes)
NICINOD = 100 # number of superblock inodes
NICFREE = 50 # number of superblock free blocks
MAX_FN = 500
DEFAULT_FN = MAX_FN
DEFAULT_FM = 3
NIPB = 8
NADDR = 13
NUM_DIRECT_ADDR = NADDR - 3
NINDIR = int(BSIZE / 4)
BYTES_PER_DIRENT = 16
IFMT = 0o0170000 # type of file
IFCHR = 0o0020000 # character special
IFDIR = 0o0040000 # directory
IFBLK = 0o0060000 # block special
IFREG = 0o0100000 # regular
ISUID = 0o04000 # set user id on execution
ISGID = 0o02000 # set group id on execution
class FilesystemSpec(object):
SUPERBLOCK_BLOCK_NUMBER = 1
def __init__(self, bootblock, total_blocks, total_inodes):
self._bootblock = bootblock
self._total_blocks = total_blocks
self._total_inodes = total_inodes
self._root = None
self._allocated_inode = 0
self._allocated_block = 0
self._available_inodes = self._total_blocks
self._available_blocks = self._total_inodes
self._fs_s_isize = int((self._total_inodes / NIPB) + 3) # size in blocks of i-list
self._fs_s_fsize = self._total_blocks # size in blocks of entire volume
self._fs_s_nfree = 0 # number of addresses in s_free
self._fs_s_free = None # free block list
self._fs_s_ninode = 0 # number of i-nodes in s_inode
self._fs_s_inode = None # free i-node list
self._fs_s_flock = '\0' # lock during free list manipulation
self._fs_s_ilock = '\0' # lock during i-list manipulation
self._fs_s_fmod = '\0' # super block modified flag
self._fs_s_ronly = '\0' # mounted read-only flag
self._fs_s_time = 0 # last super block update
# remainder not maintained by this version of the system
self._fs_s_tfree = 0 # total free blocks
self._fs_s_tinode = 0 # total free inodes
self._fs_s_m = DEFAULT_FM # interleave factor
self._fs_s_n = DEFAULT_FN # ...
self._fs_s_fname = "" # file system name
self._fs_s_fpack = "" # file system pack name
self.free_blocks = deque()
self.free_inodes = [x for x in range(1, self._total_inodes + 1)]
self.free_inodes.reverse()
print("m/n = {} {}".format(self._fs_s_m, self._fs_s_n))
if self._fs_s_isize >= self._fs_s_fsize:
raise RuntimeError("{}/{}: bad ratio".format(self._fs_s_fsize, self._fs_s_isize - 2))
self._bad_block_table_inode = IndexNode(self, IFREG, 0, 0, 0)
self._bad_block_table_inode.set_source_file(None)
assert(self._bad_block_table_inode._inum == 1)
def set_root(self, root):
assert(root._inode._inum == 2)
self._root = root
# the free list ends up rooted at inode 1 as an unreferenced regular file (from what I can see)
# inode 2 is the fs root
# everything else is allocated as needed
# blocks start at 1 (superblock), followed by the free list head and the inodes, then files/indirects and free list blocks
def build_freelist(self):
flg = [0 for _ in range(0, self._fs_s_n)]
adr = [0 for _ in range(0, self._fs_s_n)]
i = 0
for j in range(0, self._fs_s_n):
while flg[i]:
i = int((i + 1) % self._fs_s_n)
adr[j] = i + 1
flg[i] = flg[i] + 1
i = int((i + self._fs_s_m) % self._fs_s_n)
d = self._fs_s_fsize - 1
if d % self._fs_s_n == 0:
d += 1
while d % self._fs_s_n:
d += 1
while d:
for i in range(0, self._fs_s_n):
f = d - adr[i]
if f < self._fs_s_fsize and f >= self._fs_s_isize:
self.free_blocks.append(f)
d -= self._fs_s_n
def claim_inode(self):
return self.free_inodes.pop()
def claim_block(self):
return self.free_blocks.pop()
def _store_freelist(self, fh):
pad = '\0' * (BSIZE - (NICFREE * 4))
sfmt = "<{}I{}s".format(NICFREE, (BSIZE - (NICFREE * 4)))
blocks = [bno for bno in self.free_blocks]
blocks.reverse()
remaining = len(blocks)
pos = 0
while remaining:
n = min(remaining, NICFREE)
remaining -= n
chunk = blocks[pos:pos + n]
pos += n
if n != NICFREE:
chunk += [0 for _ in range(0, (NICFREE - n))]
assert(len(chunk) == NICFREE)
if not self._fs_s_free:
self._fs_s_free = chunk[:]
self._fs_s_nfree = n
bno = self._fs_s_free[0]
continue
assert((len(chunk) * 4) + len(pad) == BSIZE)
args = chunk + [pad.encode("utf-8")]
data = struct.pack(sfmt, *args)
assert(len(data) == BSIZE)
fh.seek(bno * BSIZE)
written = fh.write(data)
if written != BSIZE:
raise RuntimeError("Failed to write free-space block - only wrote {} of {} bytes".format(written, BSIZE))
bno = chunk[0]
def _store_self(self, fh):
self._fs_s_tfree = len(self.free_blocks)
self._fs_s_tinode = len(self.free_inodes)
self._fs_s_time = int(time.time())
self._fs_s_inode = self.free_inodes[0:NICINOD]
self._fs_s_ninode = len(self._fs_s_inode)
if len(self._fs_s_inode) != NICINOD:
self._fs_s_inode += [0 for _ in range(0, (NICINOD - len(self._fs_s_inode)))]
assert(len(self._fs_s_inode) == NICINOD)
assert(len(self._fs_s_free) == NICFREE)
sfmt = "@Hih{}ih{}HcccciiHhh6s6s".format(NICFREE, NICINOD)
args = [
self._fs_s_isize,
self._fs_s_fsize,
self._fs_s_nfree,
] + self._fs_s_free + [
self._fs_s_ninode,
] + self._fs_s_inode + [
self._fs_s_flock.encode("utf-8"),
self._fs_s_ilock.encode("utf-8"),
self._fs_s_fmod.encode("utf-8"),
self._fs_s_ronly.encode("utf-8"),
self._fs_s_time,
self._fs_s_tfree,
self._fs_s_tinode,
self._fs_s_m,
self._fs_s_n,
self._fs_s_fname.encode("utf-8"),
self._fs_s_fpack.encode("utf-8"),
]
data = struct.pack(sfmt, *args)
EXPECTED_DATA_LENGTH = 446 # 440 if unpadded
assert(len(data) == EXPECTED_DATA_LENGTH)
pad = b'\0' * (BSIZE - EXPECTED_DATA_LENGTH)
data += pad
fh.seek(FilesystemSpec.SUPERBLOCK_BLOCK_NUMBER * BSIZE)
written = fh.write(data)
if written != BSIZE:
raise RuntimeError("Failed to write superblock - only wrote {} of {} bytes".format(written, BSIZE))
def store(self, fh):
self._bad_block_table_inode.store(self, fh)
self._root.store(self, fh)
self._store_freelist(fh)
self._store_self(fh)
class DirectoryEntry(object):
DIRECT_SIZE = 16
def __init__(self, inum, name):
self._inum = inum
self._name = name
def __lt__(self, other):
return self._name < other._name
def as_direct(self):
d = struct.pack("@H14s", self._inum, self._name.encode("utf-8"))
assert(len(d) == DirectoryEntry.DIRECT_SIZE)
return d
"""
/*
* Inode structure as it appears on
* a disk block.
*/
struct dinode
{
u16 di_mode; /* 0 2 2 mode and type of file */
i16 di_nlink; /* 2 2 4 number of links to file */
i16 di_uid; /* 4 2 6 owner's user id */
i16 di_gid; /* 6 2 8 owner's group id */
v7_off_t di_size; /* 8 4 12 number of bytes in file */
char di_addr[40]; /* 12 40 52 disk block addresses (13 * 3 byte addresses) */
v7_time_t di_atime; /* 52 4 56 time last accessed */
v7_time_t di_mtime; /* 56 4 60 time last modified */
v7_time_t di_ctime; /* 60 4 64 time created */
};
#define INOPB 8 /* 8 inodes per block */
#define IFMT 0170000 /* type of file */
#define IFCHR 0020000 /* character special */
#define IFDIR 0040000 /* directory */
#define IFBLK 0060000 /* block special */
#define IFREG 0100000 /* regular */
#define ISUID 04000 /* set user id on execution */
#define ISGID 02000 /* set group id on execution */
dev/mx1.c: ip->i_mode = 0666+IFCHR;
"""
#define itod(x) (v7_daddr_t)((((unsigned)x+15)>>3))
#define itoo(x) (int)((x+15)&07)
def itod(x):
return (x + 15) >> 3
def itoo(x):
return (x + 15) & 0o07
#
# Pass 1: discover all nodes from the config
# Pass 2: allocate index nodes
# Pass 3: set children (now that we have inodes - sets up dirents as needed - maybe | |
available versions are:
- 2021-12-16
- 2021-01-21
- 2021-11-15
- 2021-12-06
"""
return AutomaticallyRetrievedGraph(
"RXNO", version, "kgobo", directed, preprocess, load_nodes,
load_node_types, load_edge_weights, auto_enable_tradeoffs, sort_tmp_dir, verbose, cache,
cache_path, cache_sys_var, kwargs
)()
def OMP(
directed = False, preprocess = "auto", load_nodes = True, load_node_types = True,
load_edge_weights = True, auto_enable_tradeoffs = True,
sort_tmp_dir = None, verbose = 2, cache = True, cache_path = None,
cache_sys_var = "GRAPH_CACHE_DIR", version = "2022-05-06", **kwargs
) -> Graph:
"""Return OMP graph
Parameters
----------
directed = False
preprocess = "auto"
Preprocess for optimal load time & memory peak.
Will preprocess in Linux/macOS but not Windows.
load_nodes = True
Load node names or use numeric range
auto_enable_tradeoffs = True
Enable when graph has < 50M edges
cache_path = None
Path to store graphs
Defaults either to `GRAPH_CACHE_DIR` sys var or `graphs`
cache_sys_var = "GRAPH_CACHE_DIR"
version = "2022-05-06"
Version to retrieve
The available versions are:
- 2022-06-03
- 2021-10-01
- 2021-12-03
- 2022-01-07
- 2022-02-08
- 2022-03-04
- 2022-04-11
- 2022-05-06
"""
return AutomaticallyRetrievedGraph(
"OMP", version, "kgobo", directed, preprocess, load_nodes,
load_node_types, load_edge_weights, auto_enable_tradeoffs, sort_tmp_dir, verbose, cache,
cache_path, cache_sys_var, kwargs
)()
def ERO(
directed = False, preprocess = "auto", load_nodes = True, load_node_types = True,
load_edge_weights = True, auto_enable_tradeoffs = True,
sort_tmp_dir = None, verbose = 2, cache = True, cache_path = None,
cache_sys_var = "GRAPH_CACHE_DIR", version = "no_version", **kwargs
) -> Graph:
"""Return ERO graph
Parameters
----------
directed = False
preprocess = "auto"
Preprocess for optimal load time & memory peak.
Will preprocess in Linux/macOS but not Windows.
load_nodes = True
Load node names or use numeric range
auto_enable_tradeoffs = True
Enable when graph has < 50M edges
cache_path = None
Path to store graphs
Defaults either to `GRAPH_CACHE_DIR` sys var or `graphs`
cache_sys_var = "GRAPH_CACHE_DIR"
version = "no_version"
Version to retrieve
The available versions are:
- no_version
"""
return AutomaticallyRetrievedGraph(
"ERO", version, "kgobo", directed, preprocess, load_nodes,
load_node_types, load_edge_weights, auto_enable_tradeoffs, sort_tmp_dir, verbose, cache,
cache_path, cache_sys_var, kwargs
)()
def GNO(
directed = False, preprocess = "auto", load_nodes = True, load_node_types = True,
load_edge_weights = True, auto_enable_tradeoffs = True,
sort_tmp_dir = None, verbose = 2, cache = True, cache_path = None,
cache_sys_var = "GRAPH_CACHE_DIR", version = "2021-08-13", **kwargs
) -> Graph:
"""Return GNO graph
Parameters
----------
directed = False
preprocess = "auto"
Preprocess for optimal load time & memory peak.
Will preprocess in Linux/macOS but not Windows.
load_nodes = True
Load node names or use numeric range
auto_enable_tradeoffs = True
Enable when graph has < 50M edges
cache_path = None
Path to store graphs
Defaults either to `GRAPH_CACHE_DIR` sys var or `graphs`
cache_sys_var = "GRAPH_CACHE_DIR"
version = "2021-08-13"
Version to retrieve
The available versions are:
- 2022-02-23
- 2021-08-13
"""
return AutomaticallyRetrievedGraph(
"GNO", version, "kgobo", directed, preprocess, load_nodes,
load_node_types, load_edge_weights, auto_enable_tradeoffs, sort_tmp_dir, verbose, cache,
cache_path, cache_sys_var, kwargs
)()
def XCO(
directed = False, preprocess = "auto", load_nodes = True, load_node_types = True,
load_edge_weights = True, auto_enable_tradeoffs = True,
sort_tmp_dir = None, verbose = 2, cache = True, cache_path = None,
cache_sys_var = "GRAPH_CACHE_DIR", version = "4.46", **kwargs
) -> Graph:
"""Return XCO graph
Parameters
----------
directed = False
preprocess = "auto"
Preprocess for optimal load time & memory peak.
Will preprocess in Linux/macOS but not Windows.
load_nodes = True
Load node names or use numeric range
auto_enable_tradeoffs = True
Enable when graph has < 50M edges
cache_path = None
Path to store graphs
Defaults either to `GRAPH_CACHE_DIR` sys var or `graphs`
cache_sys_var = "GRAPH_CACHE_DIR"
version = "4.46"
Version to retrieve
The available versions are:
- 4.46
"""
return AutomaticallyRetrievedGraph(
"XCO", version, "kgobo", directed, preprocess, load_nodes,
load_node_types, load_edge_weights, auto_enable_tradeoffs, sort_tmp_dir, verbose, cache,
cache_path, cache_sys_var, kwargs
)()
def AMPHX(
directed = False, preprocess = "auto", load_nodes = True, load_node_types = True,
load_edge_weights = True, auto_enable_tradeoffs = True,
sort_tmp_dir = None, verbose = 2, cache = True, cache_path = None,
cache_sys_var = "GRAPH_CACHE_DIR", version = "2020-12-18", **kwargs
) -> Graph:
"""Return AMPHX graph
Parameters
----------
directed = False
preprocess = "auto"
Preprocess for optimal load time & memory peak.
Will preprocess in Linux/macOS but not Windows.
load_nodes = True
Load node names or use numeric range
auto_enable_tradeoffs = True
Enable when graph has < 50M edges
cache_path = None
Path to store graphs
Defaults either to `GRAPH_CACHE_DIR` sys var or `graphs`
cache_sys_var = "GRAPH_CACHE_DIR"
version = "2020-12-18"
Version to retrieve
The available versions are:
- 2020-12-18
"""
return AutomaticallyRetrievedGraph(
"AMPHX", version, "kgobo", directed, preprocess, load_nodes,
load_node_types, load_edge_weights, auto_enable_tradeoffs, sort_tmp_dir, verbose, cache,
cache_path, cache_sys_var, kwargs
)()
def EPIO(
directed = False, preprocess = "auto", load_nodes = True, load_node_types = True,
load_edge_weights = True, auto_enable_tradeoffs = True,
sort_tmp_dir = None, verbose = 2, cache = True, cache_path = None,
cache_sys_var = "GRAPH_CACHE_DIR", version = "2021-05-28", **kwargs
) -> Graph:
"""Return EPIO graph
Parameters
----------
directed = False
preprocess = "auto"
Preprocess for optimal load time & memory peak.
Will preprocess in Linux/macOS but not Windows.
load_nodes = True
Load node names or use numeric range
auto_enable_tradeoffs = True
Enable when graph has < 50M edges
cache_path = None
Path to store graphs
Defaults either to `GRAPH_CACHE_DIR` sys var or `graphs`
cache_sys_var = "GRAPH_CACHE_DIR"
version = "2021-05-28"
Version to retrieve
The available versions are:
- 2021-05-28
"""
return AutomaticallyRetrievedGraph(
"EPIO", version, "kgobo", directed, preprocess, load_nodes,
load_node_types, load_edge_weights, auto_enable_tradeoffs, sort_tmp_dir, verbose, cache,
cache_path, cache_sys_var, kwargs
)()
def CLYH(
directed = False, preprocess = "auto", load_nodes = True, load_node_types = True,
load_edge_weights = True, auto_enable_tradeoffs = True,
sort_tmp_dir = None, verbose = 2, cache = True, cache_path = None,
cache_sys_var = "GRAPH_CACHE_DIR", version = "2020-05-29", **kwargs
) -> Graph:
"""Return CLYH graph
Parameters
----------
directed = False
preprocess = "auto"
Preprocess for optimal load time & memory peak.
Will preprocess in Linux/macOS but not Windows.
load_nodes = True
Load node names or use numeric range
auto_enable_tradeoffs = True
Enable when graph has < 50M edges
cache_path = None
Path to store graphs
Defaults either to `GRAPH_CACHE_DIR` sys var or `graphs`
cache_sys_var = "GRAPH_CACHE_DIR"
version = "2020-05-29"
Version to retrieve
The available versions are:
- 2020-05-29
"""
return AutomaticallyRetrievedGraph(
"CLYH", version, "kgobo", directed, preprocess, load_nodes,
load_node_types, load_edge_weights, auto_enable_tradeoffs, sort_tmp_dir, verbose, cache,
cache_path, cache_sys_var, kwargs
)()
def OOSTT(
directed = False, preprocess = "auto", load_nodes = True, load_node_types = True,
load_edge_weights = True, auto_enable_tradeoffs = True,
sort_tmp_dir = None, verbose = 2, cache = True, cache_path = None,
cache_sys_var = "GRAPH_CACHE_DIR", version = "2021-01-08", **kwargs
) -> Graph:
"""Return OOSTT graph
Parameters
----------
directed = False
preprocess = "auto"
Preprocess for optimal load time & memory peak.
Will preprocess in Linux/macOS but not Windows.
load_nodes = True
Load node names or use numeric range
auto_enable_tradeoffs = True
Enable when graph has < 50M edges
cache_path = None
Path to store graphs
Defaults either to `GRAPH_CACHE_DIR` sys var or `graphs`
cache_sys_var = "GRAPH_CACHE_DIR"
version = "2021-01-08"
Version to retrieve
The available versions are:
- 2021-01-08
"""
return AutomaticallyRetrievedGraph(
"OOSTT", version, "kgobo", directed, preprocess, load_nodes,
load_node_types, load_edge_weights, auto_enable_tradeoffs, sort_tmp_dir, verbose, cache,
cache_path, cache_sys_var, kwargs
)()
def FYPO(
directed = False, preprocess = "auto", load_nodes = True, load_node_types = True,
load_edge_weights = True, auto_enable_tradeoffs = True,
sort_tmp_dir = None, verbose = 2, cache = True, cache_path = None,
cache_sys_var = "GRAPH_CACHE_DIR", version = "2022-05-11", **kwargs
) -> Graph:
"""Return FYPO graph
Parameters
----------
directed = False
preprocess = "auto"
Preprocess for optimal load time & memory peak.
Will preprocess in Linux/macOS but not Windows.
load_nodes = True
Load node names or use numeric range
auto_enable_tradeoffs = True
Enable when graph has < 50M edges
cache_path = None
Path to store graphs
Defaults either to `GRAPH_CACHE_DIR` sys var or `graphs`
cache_sys_var = "GRAPH_CACHE_DIR"
version = "2022-05-11"
Version to retrieve
The available versions are:
- 2022-05-16
- 2021-10-05
- 2021-11-08
- 2021-11-18
- 2021-12-07
- 2022-01-18
- 2022-01-27
- 2022-04-22
- 2022-04-28
- 2022-05-11
"""
return AutomaticallyRetrievedGraph(
"FYPO", version, "kgobo", directed, preprocess, load_nodes,
load_node_types, load_edge_weights, auto_enable_tradeoffs, sort_tmp_dir, verbose, cache,
cache_path, cache_sys_var, kwargs
)()
def NCRO(
directed = False, preprocess = "auto", load_nodes = True, load_node_types = True,
load_edge_weights = True, auto_enable_tradeoffs = True,
sort_tmp_dir = None, verbose = 2, cache = True, cache_path = None,
cache_sys_var = "GRAPH_CACHE_DIR", version = "2015-12-10", **kwargs
) -> Graph:
| |
v S^2)
Defn: [v_0, sigma_2] --> [*, sigma_2]
sage: W.projection_map(1)
Simplicial set morphism:
From: Wedge: (S^2 v S^3 v S^2)
To: Quotient: (Wedge: (S^2 v S^3 v S^2)/Simplicial set with 3 non-degenerate simplices)
Defn: [*, sigma_2, sigma_2, sigma_3] --> [*, s_1 s_0 *, s_1 s_0 *, sigma_3]
Note that the codomain of the projection map is not identical
to the original ``S2``, but is instead a quotient of the wedge
which is isomorphic to ``S2``::
sage: S2.f_vector()
[1, 0, 1]
sage: W.projection_map(2).codomain().f_vector()
[1, 0, 1]
sage: (W.projection_map(2) * W.inclusion_map(2)).is_bijective()
True
TESTS::
sage: Z = SimplicialSet({e: (v,w)})
sage: X.wedge(Z)
Traceback (most recent call last):
...
ValueError: the simplicial sets must be pointed
"""
from .simplicial_set_constructions import WedgeOfSimplicialSets, \
WedgeOfSimplicialSets_finite
if all(space.is_finite() for space in [self] + list(others)):
return WedgeOfSimplicialSets_finite((self,) + others)
else:
return WedgeOfSimplicialSets((self,) + others)
def cone(self):
r"""
Return the (reduced) cone on this simplicial set.
If this simplicial set `X` is not pointed, construct the
ordinary cone: add a point `v` (which will become the base
point) and for each simplex `\sigma` in `X`, add both `\sigma`
and a simplex made up of `v` and `\sigma` (topologically, form
the join of `v` and `\sigma`).
If this simplicial set is pointed, then construct the reduced
cone: take the quotient of the unreduced cone by the 1-simplex
connecting the old base point to the new one.
In either case, as long as the simplicial set is finite, it
comes equipped in Sage with a map from it into the cone.
EXAMPLES::
sage: from sage.homology.simplicial_set import AbstractSimplex, SimplicialSet
sage: v = AbstractSimplex(0, name='v')
sage: e = AbstractSimplex(1, name='e')
sage: X = SimplicialSet({e: (v, v)})
sage: CX = X.cone() # unreduced cone, since X not pointed
sage: CX.nondegenerate_simplices()
[*, v, (v,*), e, (e,*)]
sage: CX.base_point()
*
`X` as a subset of the cone, and also the map from `X`, in the
unreduced case::
sage: CX.base_as_subset()
Simplicial set with 2 non-degenerate simplices
sage: CX.map_from_base()
Simplicial set morphism:
From: Simplicial set with 2 non-degenerate simplices
To: Cone of Simplicial set with 2 non-degenerate simplices
Defn: [v, e] --> [v, e]
In the reduced case, only the map from `X` is available::
sage: X = X.set_base_point(v)
sage: CX = X.cone() # reduced cone
sage: CX.nondegenerate_simplices()
[*, e, (e,*)]
sage: CX.map_from_base()
Simplicial set morphism:
From: Simplicial set with 2 non-degenerate simplices
To: Reduced cone of Simplicial set with 2 non-degenerate simplices
Defn: [v, e] --> [*, e]
"""
from .simplicial_set_constructions import \
ConeOfSimplicialSet, ConeOfSimplicialSet_finite, \
ReducedConeOfSimplicialSet, ReducedConeOfSimplicialSet_finite
if self.is_pointed():
if self.is_finite():
return ReducedConeOfSimplicialSet_finite(self)
else:
return ReducedConeOfSimplicialSet(self)
if self.is_finite():
return ConeOfSimplicialSet_finite(self)
else:
return ConeOfSimplicialSet(self)
def suspension(self, n=1):
"""
Return the (reduced) `n`-th suspension of this simplicial set.
INPUT:
- ``n`` (optional, default 1) -- integer, suspend this many
times.
If this simplicial set `X` is not pointed, return the
suspension: the quotient `CX/X`, where `CX` is the (ordinary,
unreduced) cone on `X`. If `X` is pointed, then use the
reduced cone instead, and so return the reduced suspension.
EXAMPLES::
sage: RP4 = simplicial_sets.RealProjectiveSpace(4)
sage: S1 = simplicial_sets.Sphere(1)
sage: SigmaRP4 = RP4.suspension()
sage: S1_smash_RP4 = S1.smash_product(RP4)
sage: SigmaRP4.homology() == S1_smash_RP4.homology()
True
The version of the suspension obtained by the smash product is
typically less efficient than the reduced suspension produced
here::
sage: SigmaRP4.f_vector()
[1, 0, 1, 1, 1, 1]
sage: S1_smash_RP4.f_vector()
[1, 1, 4, 6, 8, 5]
TESTS::
sage: RP4.suspension(-3)
Traceback (most recent call last):
...
ValueError: n must be non-negative
"""
from .simplicial_set_constructions import \
SuspensionOfSimplicialSet, SuspensionOfSimplicialSet_finite
if n < 0:
raise ValueError('n must be non-negative')
if n == 0:
return self
if self.is_finite():
Sigma = SuspensionOfSimplicialSet_finite(self)
else:
Sigma = SuspensionOfSimplicialSet(self)
if n == 1:
return Sigma
return Sigma.suspension(n-1)
def join(self, *others):
"""
The join of this simplicial set with ``others``.
Not implemented. See
https://ncatlab.org/nlab/show/join+of+simplicial+sets for a
few descriptions, for anyone interested in implementing
this. See also <NAME> and <NAME>, Joins for
(Augmented) Simplicial Sets, Jour. Pure Applied Algebra, 145
(2000) 37-44 :arxiv:`9904039`.
- ``others`` -- one or several simplicial sets
EXAMPLES::
sage: K = simplicial_sets.Simplex(2)
sage: K.join(K)
Traceback (most recent call last):
...
NotImplementedError: joins are not implemented for simplicial sets
"""
raise NotImplementedError('joins are not implemented for simplicial sets')
def reduce(self):
"""
Reduce this simplicial set.
That is, take the quotient by a spanning tree of the
1-skeleton, so that the resulting simplicial set has only one
vertex. This only makes sense if the simplicial set is
connected, so raise an error if not. If already reduced,
return itself.
EXAMPLES::
sage: K = simplicial_sets.Simplex(2)
sage: K.is_reduced()
False
sage: X = K.reduce()
sage: X.is_reduced()
True
``X`` is reduced, so calling ``reduce`` on it again
returns ``X`` itself::
sage: X is X.reduce()
True
sage: K is K.reduce()
False
Raise an error for disconnected simplicial sets::
sage: S0 = simplicial_sets.Sphere(0)
sage: S0.reduce()
Traceback (most recent call last):
...
ValueError: this simplicial set is not connected
"""
if self.is_reduced():
return self
if not self.is_connected():
raise ValueError("this simplicial set is not connected")
graph = self.graph()
spanning_tree = [e[2] for e in graph.min_spanning_tree()]
return self.quotient(spanning_tree)
def _Hom_(self, other, category=None):
"""
Return the set of simplicial maps between simplicial sets
``self`` and ``other``.
INPUT:
- ``other`` -- another simplicial set
- ``category`` -- optional, the category in which to compute
the maps. By default this is ``SimplicialSets``, and it must
be a subcategory of this or else an error is raised.
EXAMPLES::
sage: S3 = simplicial_sets.Sphere(3)
sage: S2 = simplicial_sets.Sphere(2)
sage: S3._Hom_(S2)
Set of Morphisms from S^3 to S^2 in Category of finite pointed simplicial sets
sage: Hom(S3, S2)
Set of Morphisms from S^3 to S^2 in Category of finite pointed simplicial sets
sage: K4 = simplicial_sets.Simplex(4)
sage: S3._Hom_(K4)
Set of Morphisms from S^3 to 4-simplex in Category of finite simplicial sets
"""
# Import this here to prevent circular imports.
from sage.homology.simplicial_set_morphism import SimplicialSetHomset
# Error-checking on the ``category`` argument is done when
# calling Hom(X,Y), so no need to do it again here.
if category is None:
if self.is_finite() and other.is_finite():
if self.is_pointed() and other.is_pointed():
category = SimplicialSets().Finite().Pointed()
else:
category = SimplicialSets().Finite()
else:
if self.is_pointed() and other.is_pointed():
category = SimplicialSets().Pointed()
else:
category = SimplicialSets()
return SimplicialSetHomset(self, other, category=category)
def rename_latex(self, s):
"""
Rename or set the LaTeX name for this simplicial set.
INPUT:
- ``s`` -- string, the LaTeX representation. Or ``s`` can be
``None``, in which case the LaTeX name is unset.
EXAMPLES::
sage: from sage.homology.simplicial_set import AbstractSimplex, SimplicialSet
sage: v = AbstractSimplex(0)
sage: X = SimplicialSet({v: None}, latex_name='*')
sage: latex(X)
*
sage: X.rename_latex('x_0')
sage: latex(X)
x_0
"""
self._latex_name = s
def _latex_(self):
r"""
LaTeX representation.
If ``latex_name`` is set when the simplicial set is defined,
or if :meth:`rename_latex` is used to set the LaTeX name, use
that. Otherwise, use its string representation.
EXAMPLES::
sage: from sage.homology.simplicial_set import AbstractSimplex, SimplicialSet
sage: v = AbstractSimplex(0)
sage: X = SimplicialSet({v: None}, latex_name='*')
sage: latex(X)
*
sage: X.rename_latex('y_0')
sage: latex(X)
y_0
sage: X.rename_latex(None)
sage: latex(X)
Simplicial set with 1 non-degenerate simplex
sage: X.rename('v')
sage: latex(X)
v
"""
if hasattr(self, '_latex_name') and self._latex_name is not None:
return self._latex_name
return str(self)
def _repr_(self):
"""
Print representation.
EXAMPLES::
sage: from sage.homology.simplicial_set import AbstractSimplex, SimplicialSet
sage: v = AbstractSimplex(0)
sage: w = AbstractSimplex(0)
sage: degen = v.apply_degeneracies(0)
sage: tau = AbstractSimplex(2)
sage: SimplicialSet({tau: (degen, degen, degen), w: None})
Simplicial set with 3 non-degenerate simplices
sage: SimplicialSet({w: None})
Simplicial set with 1 non-degenerate simplex
Test names and renaming::
sage: SimplicialSet({w: None}, name='pt')
pt
sage: K = SimplicialSet({w: None}, name='pt')
sage: K.rename('point')
sage: K
point
"""
num = len(self.nondegenerate_simplices())
if num == 1:
return "Simplicial set with 1 non-degenerate simplex"
return "Simplicial set with {} non-degenerate simplices".format(num)
class SimplicialSet_finite(SimplicialSet_arbitrary, GenericCellComplex):
r"""
A finite simplicial set.
A simplicial set `X` is a collection of sets `X_n`, the
*n-simplices*, indexed by the non-negative integers, together with
face maps `d_i` and degeneracy maps | |
import json
from django.core import mail
from .. import test
from ...acl.test import patch_user_acl
from ...users.test import create_test_user
from ..models import Thread, ThreadParticipant
from ..test import other_user_cant_use_private_threads
from .test_privatethreads import PrivateThreadsTestCase
class PrivateThreadPatchApiTestCase(PrivateThreadsTestCase):
def setUp(self):
super().setUp()
self.thread = test.post_thread(self.category, poster=self.user)
self.api_link = self.thread.get_api_url()
self.other_user = create_test_user("OtherUser", "<EMAIL>")
def patch(self, api_link, ops):
return self.client.patch(
api_link, json.dumps(ops), content_type="application/json"
)
class PrivateThreadAddParticipantApiTests(PrivateThreadPatchApiTestCase):
def test_add_participant_not_owner(self):
"""non-owner can't add participant"""
ThreadParticipant.objects.add_participants(self.thread, [self.user])
response = self.patch(
self.api_link,
[{"op": "add", "path": "participants", "value": self.user.username}],
)
self.assertEqual(response.status_code, 400)
self.assertEqual(
response.json(),
{
"id": self.thread.pk,
"detail": [
"You have to be thread owner to add new participants to it."
],
},
)
def test_add_empty_username(self):
"""path validates username"""
ThreadParticipant.objects.set_owner(self.thread, self.user)
response = self.patch(
self.api_link, [{"op": "add", "path": "participants", "value": ""}]
)
self.assertEqual(response.status_code, 400)
self.assertEqual(
response.json(),
{
"id": self.thread.pk,
"detail": ["You have to enter new participant's username."],
},
)
def test_add_nonexistant_user(self):
"""can't user two times"""
ThreadParticipant.objects.set_owner(self.thread, self.user)
response = self.patch(
self.api_link,
[{"op": "add", "path": "participants", "value": "InvalidUser"}],
)
self.assertEqual(response.status_code, 400)
self.assertEqual(
response.json(),
{"id": self.thread.pk, "detail": ["No user with such name exists."]},
)
def test_add_already_participant(self):
"""can't add user that is already participant"""
ThreadParticipant.objects.set_owner(self.thread, self.user)
response = self.patch(
self.api_link,
[{"op": "add", "path": "participants", "value": self.user.username}],
)
self.assertEqual(response.status_code, 400)
self.assertEqual(
response.json(),
{
"id": self.thread.pk,
"detail": ["This user is already thread participant."],
},
)
def test_add_blocking_user(self):
"""can't add user that is already participant"""
ThreadParticipant.objects.set_owner(self.thread, self.user)
self.other_user.blocks.add(self.user)
response = self.patch(
self.api_link,
[{"op": "add", "path": "participants", "value": self.other_user.username}],
)
self.assertEqual(response.status_code, 400)
self.assertEqual(
response.json(),
{"id": self.thread.pk, "detail": ["OtherUser is blocking you."]},
)
@patch_user_acl(other_user_cant_use_private_threads)
def test_add_no_perm_user(self):
"""can't add user that has no permission to use private threads"""
ThreadParticipant.objects.set_owner(self.thread, self.user)
response = self.patch(
self.api_link,
[{"op": "add", "path": "participants", "value": self.other_user.username}],
)
self.assertEqual(response.status_code, 400)
self.assertEqual(
response.json(),
{
"id": self.thread.pk,
"detail": ["OtherUser can't participate in private threads."],
},
)
@patch_user_acl({"max_private_thread_participants": 3})
def test_add_too_many_users(self):
"""can't add user that is already participant"""
ThreadParticipant.objects.set_owner(self.thread, self.user)
for i in range(3):
user = create_test_user("User%s" % i, "<EMAIL>" % i)
ThreadParticipant.objects.add_participants(self.thread, [user])
response = self.patch(
self.api_link,
[{"op": "add", "path": "participants", "value": self.other_user.username}],
)
self.assertEqual(response.status_code, 400)
self.assertEqual(
response.json(),
{
"id": self.thread.pk,
"detail": ["You can't add any more new users to this thread."],
},
)
def test_add_user_closed_thread(self):
"""adding user to closed thread fails for non-moderator"""
ThreadParticipant.objects.set_owner(self.thread, self.user)
self.thread.is_closed = True
self.thread.save()
response = self.patch(
self.api_link,
[{"op": "add", "path": "participants", "value": self.other_user.username}],
)
self.assertEqual(response.status_code, 400)
self.assertEqual(
response.json(),
{
"id": self.thread.pk,
"detail": ["Only moderators can add participants to closed threads."],
},
)
def test_add_user(self):
"""
adding user to thread add user to thread as participant,
sets event and emails them
"""
ThreadParticipant.objects.set_owner(self.thread, self.user)
self.patch(
self.api_link,
[{"op": "add", "path": "participants", "value": self.other_user.username}],
)
# event was set on thread
event = self.thread.post_set.order_by("id").last()
self.assertTrue(event.is_event)
self.assertTrue(event.event_type, "added_participant")
# notification about new private thread was sent to other user
self.assertEqual(len(mail.outbox), 1)
email = mail.outbox[-1]
self.assertIn(self.user.username, email.subject)
self.assertIn(self.thread.title, email.subject)
@patch_user_acl({"can_moderate_private_threads": True})
def test_add_user_to_other_user_thread_moderator(self):
"""moderators can add users to other users threads"""
ThreadParticipant.objects.set_owner(self.thread, self.other_user)
self.thread.has_reported_posts = True
self.thread.save()
self.patch(
self.api_link,
[{"op": "add", "path": "participants", "value": self.user.username}],
)
# event was set on thread
event = self.thread.post_set.order_by("id").last()
self.assertTrue(event.is_event)
self.assertTrue(event.event_type, "entered_thread")
# notification about new private thread wasn't send because we invited ourselves
self.assertEqual(len(mail.outbox), 0)
@patch_user_acl({"can_moderate_private_threads": True})
def test_add_user_to_closed_moderator(self):
"""moderators can add users to closed threads"""
ThreadParticipant.objects.set_owner(self.thread, self.user)
self.thread.is_closed = True
self.thread.save()
self.patch(
self.api_link,
[{"op": "add", "path": "participants", "value": self.other_user.username}],
)
# event was set on thread
event = self.thread.post_set.order_by("id").last()
self.assertTrue(event.is_event)
self.assertTrue(event.event_type, "added_participant")
# notification about new private thread was sent to other user
self.assertEqual(len(mail.outbox), 1)
email = mail.outbox[-1]
self.assertIn(self.user.username, email.subject)
self.assertIn(self.thread.title, email.subject)
class PrivateThreadRemoveParticipantApiTests(PrivateThreadPatchApiTestCase):
def test_remove_empty(self):
"""api handles empty user id"""
ThreadParticipant.objects.set_owner(self.thread, self.user)
response = self.patch(
self.api_link, [{"op": "remove", "path": "participants", "value": ""}]
)
self.assertEqual(response.status_code, 400)
self.assertEqual(
response.json(),
{"id": self.thread.pk, "detail": ["A valid integer is required."]},
)
def test_remove_invalid(self):
"""api validates user id type"""
ThreadParticipant.objects.set_owner(self.thread, self.user)
response = self.patch(
self.api_link, [{"op": "remove", "path": "participants", "value": "string"}]
)
self.assertEqual(response.status_code, 400)
self.assertEqual(
response.json(),
{"id": self.thread.pk, "detail": ["A valid integer is required."]},
)
def test_remove_nonexistant(self):
"""removed user has to be participant"""
ThreadParticipant.objects.set_owner(self.thread, self.user)
response = self.patch(
self.api_link,
[{"op": "remove", "path": "participants", "value": self.other_user.pk}],
)
self.assertEqual(response.status_code, 400)
self.assertEqual(
response.json(),
{"id": self.thread.pk, "detail": ["Participant doesn't exist."]},
)
def test_remove_not_owner(self):
"""api validates if user trying to remove other user is an owner"""
ThreadParticipant.objects.set_owner(self.thread, self.other_user)
ThreadParticipant.objects.add_participants(self.thread, [self.user])
response = self.patch(
self.api_link,
[{"op": "remove", "path": "participants", "value": self.other_user.pk}],
)
self.assertEqual(response.status_code, 400)
self.assertEqual(
response.json(),
{
"id": self.thread.pk,
"detail": [
"You have to be thread owner to remove participants from it."
],
},
)
def test_owner_remove_user_closed_thread(self):
"""api disallows owner to remove other user from closed thread"""
ThreadParticipant.objects.set_owner(self.thread, self.user)
ThreadParticipant.objects.add_participants(self.thread, [self.other_user])
self.thread.is_closed = True
self.thread.save()
response = self.patch(
self.api_link,
[{"op": "remove", "path": "participants", "value": self.other_user.pk}],
)
self.assertEqual(response.status_code, 400)
self.assertEqual(
response.json(),
{
"id": self.thread.pk,
"detail": [
"Only moderators can remove participants from closed threads."
],
},
)
def test_user_leave_thread(self):
"""api allows user to remove themself from thread"""
ThreadParticipant.objects.set_owner(self.thread, self.other_user)
ThreadParticipant.objects.add_participants(self.thread, [self.user])
self.user.subscription_set.create(category=self.category, thread=self.thread)
response = self.patch(
self.api_link,
[{"op": "remove", "path": "participants", "value": self.user.pk}],
)
self.assertEqual(response.status_code, 200)
self.assertFalse(response.json()["deleted"])
# thread still exists
self.assertTrue(Thread.objects.get(pk=self.thread.pk))
# leave event has valid type
event = self.thread.post_set.order_by("id").last()
self.assertTrue(event.is_event)
self.assertTrue(event.event_type, "participant_left")
# valid users were flagged for sync
self.user.refresh_from_db()
self.assertTrue(self.user.sync_unread_private_threads)
self.other_user.refresh_from_db()
self.assertTrue(self.other_user.sync_unread_private_threads)
# user was removed from participation
self.assertEqual(self.thread.participants.count(), 1)
self.assertEqual(self.thread.participants.filter(pk=self.user.pk).count(), 0)
# thread was removed from user subscriptions
self.assertEqual(self.user.subscription_set.count(), 0)
def test_user_leave_closed_thread(self):
"""api allows user to remove themself from closed thread"""
ThreadParticipant.objects.set_owner(self.thread, self.other_user)
ThreadParticipant.objects.add_participants(self.thread, [self.user])
self.thread.is_closed = True
self.thread.save()
response = self.patch(
self.api_link,
[{"op": "remove", "path": "participants", "value": self.user.pk}],
)
self.assertEqual(response.status_code, 200)
self.assertFalse(response.json()["deleted"])
# thread still exists
self.assertTrue(Thread.objects.get(pk=self.thread.pk))
# leave event has valid type
event = self.thread.post_set.order_by("id").last()
self.assertTrue(event.is_event)
self.assertTrue(event.event_type, "participant_left")
# valid users were flagged for sync
self.user.refresh_from_db()
self.assertTrue(self.user.sync_unread_private_threads)
self.other_user.refresh_from_db()
self.assertTrue(self.other_user.sync_unread_private_threads)
# user was removed from participation
self.assertEqual(self.thread.participants.count(), 1)
self.assertEqual(self.thread.participants.filter(pk=self.user.pk).count(), 0)
@patch_user_acl({"can_moderate_private_threads": True})
def test_moderator_remove_user(self):
"""api allows moderator to remove other user"""
removed_user = create_test_user("RemovedUser", "<EMAIL>")
ThreadParticipant.objects.set_owner(self.thread, self.other_user)
ThreadParticipant.objects.add_participants(
self.thread, [self.user, removed_user]
)
response = self.patch(
self.api_link,
[{"op": "remove", "path": "participants", "value": removed_user.pk}],
)
self.assertEqual(response.status_code, 200)
self.assertFalse(response.json()["deleted"])
# thread still exists
self.assertTrue(Thread.objects.get(pk=self.thread.pk))
# leave event has valid type
event = self.thread.post_set.order_by("id").last()
self.assertTrue(event.is_event)
self.assertTrue(event.event_type, "participant_removed")
# valid users were flagged for sync
self.user.refresh_from_db()
self.assertTrue(self.user.sync_unread_private_threads)
self.other_user.refresh_from_db()
self.assertTrue(self.other_user.sync_unread_private_threads)
removed_user.refresh_from_db()
self.assertTrue(removed_user.sync_unread_private_threads)
# user was removed from participation
self.assertEqual(self.thread.participants.count(), 2)
self.assertEqual(self.thread.participants.filter(pk=removed_user.pk).count(), 0)
def test_owner_remove_user(self):
"""api allows owner to remove other user"""
ThreadParticipant.objects.set_owner(self.thread, self.user)
ThreadParticipant.objects.add_participants(self.thread, [self.other_user])
response = self.patch(
self.api_link,
[{"op": "remove", "path": "participants", "value": self.other_user.pk}],
)
self.assertEqual(response.status_code, 200)
self.assertFalse(response.json()["deleted"])
# thread still exists
self.assertTrue(Thread.objects.get(pk=self.thread.pk))
# leave event has valid type
event = self.thread.post_set.order_by("id").last()
self.assertTrue(event.is_event)
self.assertTrue(event.event_type, "participant_removed")
# valid users were flagged for sync
self.user.refresh_from_db()
self.assertTrue(self.user.sync_unread_private_threads)
self.other_user.refresh_from_db()
self.assertTrue(self.other_user.sync_unread_private_threads)
# user was removed from participation
self.assertEqual(self.thread.participants.count(), 1)
self.assertEqual(
self.thread.participants.filter(pk=self.other_user.pk).count(), 0
)
def test_owner_leave_thread(self):
"""api allows owner to remove hisemf from thread, causing thread to close"""
ThreadParticipant.objects.set_owner(self.thread, self.user)
ThreadParticipant.objects.add_participants(self.thread, [self.other_user])
response = self.patch(
self.api_link,
[{"op": "remove", "path": "participants", "value": self.user.pk}],
)
self.assertEqual(response.status_code, 200)
self.assertFalse(response.json()["deleted"])
# thread still exists and is closed
self.assertTrue(Thread.objects.get(pk=self.thread.pk).is_closed)
# leave event has valid type
event = self.thread.post_set.order_by("id").last()
self.assertTrue(event.is_event)
self.assertTrue(event.event_type, "owner_left")
# valid users were flagged for sync
self.user.refresh_from_db()
self.assertTrue(self.user.sync_unread_private_threads)
self.other_user.refresh_from_db()
self.assertTrue(self.other_user.sync_unread_private_threads)
# user was removed from participation
self.assertEqual(self.thread.participants.count(), 1)
self.assertEqual(self.thread.participants.filter(pk=self.user.pk).count(), 0)
def test_last_user_leave_thread(self):
"""api allows last user leave thread, causing thread to delete"""
ThreadParticipant.objects.set_owner(self.thread, self.user)
response = self.patch(
self.api_link,
[{"op": "remove", "path": "participants", "value": self.user.pk}],
)
self.assertEqual(response.status_code, 200)
self.assertTrue(response.json()["deleted"])
# thread is gone
with self.assertRaises(Thread.DoesNotExist):
Thread.objects.get(pk=self.thread.pk)
# valid users were flagged for sync
self.user.refresh_from_db()
self.assertTrue(self.user.sync_unread_private_threads)
class PrivateThreadTakeOverApiTests(PrivateThreadPatchApiTestCase):
def test_empty_user_id(self):
"""api handles empty user id"""
ThreadParticipant.objects.set_owner(self.thread, self.user)
response = self.patch(
self.api_link, [{"op": "replace", "path": "owner", "value": ""}]
)
self.assertEqual(response.status_code, 400)
self.assertEqual(
response.json(),
{"id": self.thread.pk, "detail": ["A valid integer is required."]},
)
def test_invalid_user_id(self):
"""api handles invalid user id"""
ThreadParticipant.objects.set_owner(self.thread, self.user)
response = self.patch(
self.api_link, [{"op": "replace", "path": "owner", "value": "dsadsa"}]
)
self.assertEqual(response.status_code, 400)
self.assertEqual(
response.json(),
{"id": self.thread.pk, "detail": ["A valid integer is required."]},
)
def test_nonexistant_user_id(self):
"""api handles nonexistant user id"""
ThreadParticipant.objects.set_owner(self.thread, self.user)
response = self.patch(
self.api_link,
[{"op": "replace", "path": "owner", "value": self.other_user.pk}],
)
self.assertEqual(response.status_code, 400)
self.assertEqual(
response.json(),
{"id": self.thread.pk, "detail": ["Participant doesn't exist."]},
)
def test_no_permission(self):
"""non-moderator/owner can't change owner"""
ThreadParticipant.objects.set_owner(self.thread, self.other_user)
ThreadParticipant.objects.add_participants(self.thread, [self.user])
response = self.patch(
self.api_link, [{"op": "replace", "path": "owner", "value": | |
<filename>neo4j/aio/__init__.py
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
# Copyright (c) 2002-2019 "Neo4j,"
# Neo4j Sweden AB [http://neo4j.com]
#
# This file is part of Neo4j.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from asyncio import (
IncompleteReadError,
Lock,
StreamReader,
StreamReaderProtocol,
StreamWriter,
get_event_loop,
wait,
)
from collections import deque
from logging import getLogger
from os import strerror
from random import choice
from ssl import SSLError
from sys import platform, version_info
from time import perf_counter
from neo4j.addressing import Address
from neo4j.aio._collections import WaitingList
from neo4j.aio._mixins import Addressable, Breakable
from neo4j.errors import (
BoltError,
BoltConnectionError,
BoltSecurityError,
BoltConnectionBroken,
BoltHandshakeError,
Neo4jAvailabilityError,
)
from neo4j.api import Version
from neo4j.conf import Config, PoolConfig
from neo4j.meta import version as neo4j_version
from neo4j.routing import RoutingTable
log = getLogger(__name__)
MAGIC = b"\x60\x60\xB0\x17"
class Bolt(Addressable, object):
#: True if this instance uses secure communication, false
#: otherwise.
secure = None
#: As a class attribute, this denotes the version of Bolt handled
#: by that subclass. As an instance attribute, this represents the
#: version of the protocol in use.
protocol_version = ()
# Record of the time at which this connection was opened.
__t_opened = None
# Handle to the StreamReader object.
__reader = None
# Handle to the StreamWriter object, which can be used on close.
__writer = None
# Flag to indicate that the connection is closed
__closed = False
@classmethod
def default_user_agent(cls):
""" Return the default user agent string for a connection.
"""
template = "neo4j-python/{} Python/{}.{}.{}-{}-{} ({})"
fields = (neo4j_version,) + tuple(version_info) + (platform,)
return template.format(*fields)
@classmethod
def protocol_handlers(cls, protocol_version=None):
""" Return a dictionary of available Bolt protocol handlers,
keyed by version tuple. If an explicit protocol version is
provided, the dictionary will contain either zero or one items,
depending on whether that version is supported. If no protocol
version is provided, all available versions will be returned.
:param protocol_version: tuple identifying a specific protocol
version (e.g. (3, 5)) or None
:return: dictionary of version tuple to handler class for all
relevant and supported protocol versions
:raise TypeError: if protocol version is not passed in a tuple
"""
# Carry out subclass imports locally to avoid circular
# dependency issues.
from neo4j.aio.bolt3 import Bolt3
handlers = {bolt.protocol_version: bolt for bolt in [
# This list can be updated as protocol
# versions are added and removed.
Bolt3,
]}
if protocol_version is None:
return handlers
if not isinstance(protocol_version, tuple):
raise TypeError("Protocol version must be specified as a tuple")
return {version: handler
for version, handler in handlers.items()
if version == protocol_version}
@classmethod
def opener(cls, auth=None, **config):
""" Create and return an opener function for a given set of
configuration parameters. This is useful when multiple servers share
the same configuration details, such as within a connection pool.
"""
async def f(address, *, loop=None):
return await Bolt.open(address, auth=auth, loop=loop, **config)
return f
@classmethod
async def open(cls, address, *, auth=None, loop=None, **config):
""" Open a socket connection and perform protocol version
negotiation, in order to construct and return a Bolt client
instance for a supported Bolt protocol version.
:param address: tuples of host and port, such as
("127.0.0.1", 7687)
:param auth:
:param loop:
:param config:
:return: instance of a Bolt subclass
:raise BoltConnectionError: if a connection could not be
established
:raise BoltConnectionLost: if an I/O error occurs on the
underlying socket connection
:raise BoltHandshakeError: if handshake completes without a
successful negotiation
:raise TypeError: if any of the arguments provided are passed
as incompatible types
:raise ValueError: if any of the arguments provided are passed
with unsupported values
"""
# Args
address = Address(address)
if loop is None:
loop = get_event_loop()
config = PoolConfig.consume(config)
# Connect
reader, writer = await cls._connect(address, loop, config)
try:
# Handshake
subclass = await cls._handshake(reader, writer, config.protocol_version)
# Instantiation
obj = subclass(reader, writer)
obj.secure = bool(config.secure)
assert hasattr(obj, "__ainit__")
await obj.__ainit__(auth)
return obj
except BoltError:
writer.write_eof()
writer.close()
raise
@classmethod
async def _connect(cls, address, loop, config):
""" Attempt to establish a TCP connection to the address
provided.
:param address:
:param loop:
:param config:
:return: a 3-tuple of reader, writer and security settings for
the new connection
:raise BoltConnectionError: if a connection could not be
established
"""
assert isinstance(address, Address)
assert loop is not None
assert isinstance(config, Config)
connection_args = {
"host": address.host,
"port": address.port,
"family": address.family,
# TODO: other args
}
ssl_context = config.get_ssl_context()
if ssl_context:
connection_args["ssl"] = ssl_context
connection_args["server_hostname"] = address.host
log.debug("[#0000] C: <DIAL> %s", address)
try:
reader = BoltStreamReader(loop=loop)
protocol = StreamReaderProtocol(reader, loop=loop)
transport, _ = await loop.create_connection(lambda: protocol, **connection_args)
writer = BoltStreamWriter(transport, protocol, reader, loop)
except SSLError as err:
log.debug("[#%04X] S: <REJECT> %s (%d %s)", 0, address,
err.errno, strerror(err.errno))
raise BoltSecurityError("Failed to establish a secure connection", address) from err
except OSError as err:
log.debug("[#%04X] S: <REJECT> %s (%d %s)", 0, address,
err.errno, strerror(err.errno))
raise BoltConnectionError("Failed to establish a connection", address) from err
else:
local_address = Address(transport.get_extra_info("sockname"))
remote_address = Address(transport.get_extra_info("peername"))
log.debug("[#%04X] S: <ACCEPT> %s -> %s",
local_address.port_number, local_address, remote_address)
return reader, writer
@classmethod
async def _handshake(cls, reader, writer, protocol_version):
""" Carry out a Bolt handshake, optionally requesting a
specific protocol version.
:param reader:
:param writer:
:param protocol_version:
:return:
:raise BoltConnectionLost: if an I/O error occurs on the
underlying socket connection
:raise BoltHandshakeError: if handshake completes without a
successful negotiation
"""
local_address = Address(writer.transport.get_extra_info("sockname"))
remote_address = Address(writer.transport.get_extra_info("peername"))
handlers = cls.protocol_handlers(protocol_version)
if not handlers:
raise ValueError("No protocol handlers available (requested Bolt %r)", protocol_version)
offered_versions = sorted(handlers.keys(), reverse=True)[:4]
request_data = MAGIC + b"".join(
v.to_bytes() for v in offered_versions).ljust(16, b"\x00")
log.debug("[#%04X] C: <HANDSHAKE> %r", local_address.port_number, request_data)
writer.write(request_data)
await writer.drain()
response_data = await reader.readexactly(4)
log.debug("[#%04X] S: <HANDSHAKE> %r", local_address.port_number, response_data)
try:
agreed_version = Version.from_bytes(response_data)
except ValueError as err:
writer.close()
raise BoltHandshakeError("Unexpected handshake response %r" % response_data,
remote_address, request_data, response_data) from err
try:
subclass = handlers[agreed_version]
except KeyError:
log.debug("Unsupported Bolt protocol version %s", agreed_version)
raise BoltHandshakeError("Unsupported Bolt protocol version",
remote_address, request_data, response_data)
else:
return subclass
def __new__(cls, reader, writer):
obj = super().__new__(cls)
obj.__t_opened = perf_counter()
obj.__reader = reader
obj.__writer = writer
Addressable.set_transport(obj, writer.transport)
return obj
def __repr__(self):
return "<Bolt address=%r protocol_version=%r>" % (self.remote_address,
self.protocol_version)
async def __ainit__(self, auth):
""" Asynchronous initializer for implementation by subclasses.
:param auth:
"""
@property
def age(self):
""" The age of this connection in seconds.
"""
return perf_counter() - self.__t_opened
@property
def broken(self):
""" Flag to indicate whether this connection has been broken
by the network or remote peer.
"""
return self.__reader.broken or self.__writer.broken
@property
def closed(self):
""" Flag to indicate whether this connection has been closed
locally."""
return self.__closed
async def close(self):
""" Close the connection.
"""
if self.closed:
return
if not self.broken:
log.debug("[#%04X] S: <HANGUP>", self.local_address.port_number)
self.__writer.write_eof()
self.__writer.close()
try:
await self.__writer.wait_closed()
except BoltConnectionBroken:
pass
self.__closed = True
async def reset(self, force=False):
""" Reset the connection to a clean state.
By default, a RESET message will only be sent if required, i.e.
if the connection is not already in a clean state. If forced,
this check will be overridden and a RESET will be sent
regardless.
"""
async def run(self, cypher, parameters=None, discard=False, readonly=False,
bookmarks=None, timeout=None, metadata=None):
""" Run an auto-commit transaction.
:param cypher:
:param parameters:
:param discard:
:param readonly:
:param bookmarks:
:param timeout:
:param metadata:
:raise BoltTransactionError: if a transaction cannot be carried
out at this time
"""
async def begin(self, readonly=False, bookmarks=None,
timeout=None, metadata=None):
""" Begin an explicit transaction.
:param readonly:
:param bookmarks:
:param timeout:
:param metadata:
:return:
"""
async def run_tx(self, f, args=None, kwargs=None, readonly=False,
bookmarks=None, timeout=None, metadata=None):
""" Run a transaction function and return the return value from
that function.
"""
async def get_routing_table(self, context=None):
""" Fetch a new routing table.
:param context: the routing context to | |
'wcfg':{'bd':6},
'gridcfg':{'sticky':tkinter.E+tkinter.W, 'column': 3, 'row':-1,'columnspan':3},
'command':self.Close_cb})
def setNumEvals(self, event=None):
t = self.ifd.entryByName['run_type']['widget'].get()
self.ga_num_evals.set(self.runDict[t])
self.runType.set(t)
def set_crossover_mode(self, event=None):
t = self.ifd.entryByName['ga_crossovermodeChoices']['widget'].get()
self.crossover_modeType.set(t)
def Accept_cb(self, event=None):
changeVals = {}
for item in ['ga_run', 'ga_pop_size', 'ga_num_evals',\
'ga_num_generations', 'ga_elitism', 'ga_mutation_rate', \
'ga_crossover_rate', 'ga_cauchy_alpha', 'ga_cauchy_beta',\
'ga_window_size']:
var = eval('self.'+item)
if self.vf.dpo[item]['value']!= var.get():
changeVals[item] = var.get()
if self.crossover_modeType.get()!=self.vf.dpo['ga_crossover_mode']['value']:
print("changing ga_crossover_mode from ", self.vf.dpo['ga_crossover_mode']['value'], ' to ', self.crossover_modeType.get())
changeVals['ga_crossover_mode_flag']=1
changeVals['ga_crossover_mode']= self.crossover_modeType.get()
if len(list(changeVals.keys()))>0:
changeVals['topCommand'] = 0
self.doitWrapper(*(), **changeVals)
self.form.withdraw()
def Close_cb(self, event=None):
self.form.withdraw()
def doit(self, *args, **kw):
self.vf.ADdpf_setDpo(*(), **kw)
GAGUI=CommandGUI()
GAGUI.addMenuCommand('AutoToolsBar', menuText['AutoDpfMB'],menuText['GA'], cascadeName = menuText['SetSearchParmsMB'])
class LS(MVCommand):
""" allows user to set necessary parameters for local search-based autodock job"""
def guiCallback(self):
"""called each time the 'set other options ' button is pressed"""
if not hasattr(self, 'form'):
self.buildForm()
self.form = self.vf.getUserInput(self.ifd, modal=0, blocking=0)
self.form.root.protocol('WM_DELETE_WINDOW',self.Close_cb)
else:
self.form.root.deiconify()
itemList = ['sw_max_its','sw_max_succ','sw_max_fail','do_local_only']
varList = [self.sw_max_its, self.sw_max_succ, self.sw_max_fail,\
self.do_local_only]
for i in range(len(itemList)):
varList[i].set(self.vf.dpo[itemList[i]]['value'])
itemList2 = ['sw_rho', 'sw_lb_rho', 'ls_search_freq', 'set_psw1','set_sw1']
varList2 = [self.sw_rho, self.sw_lb_rho, self.ls_search_freq,\
self.set_psw1, self.set_sw1]
for i in range(len(itemList2)):
varList2[i].set(str(self.vf.dpo[itemList2[i]]['value']))
##first set the ints:
#for item in ['sw_max_its','sw_max_succ','sw_max_fail','do_local_only']:
##setattr(self,item,self.vf.dpo[item]['value'])
#exec('self.'+item+'.set(self.vf.dpo[\''+item+"\']['value'])")
##exec('self.'+item+'.set(self.vf.dpo[\''+item+"\']['value'])")
#
##next set the floats:
#for item in ['sw_rho', 'sw_lb_rho', 'ls_search_freq']:
#exec('self.'+item+'.set(str(self.vf.dpo[\''+item+"\']['value']))")
##exec('self.'+item+'.set(str(self.vf.dpo[\''+item+"\']['value']))")
##setattr(self,item,self.vf.dpo[item]['value'])
#
##last set the booleans:
#for item in ['set_psw1','set_sw1']:
#exec('self.'+item+'.set(str(self.vf.dpo[\''+item+"\']['value']))")
##exec('self.'+item+'.set(str(self.vf.dpo[\''+item+"\']['value']))")
#setattr(self,item,self.vf.dpo[item]['value'])
def buildForm(self):
self.do_local_only=tkinter.StringVar(master=self.vf.GUI.ROOT)
self.sw_max_its=tkinter.StringVar(master=self.vf.GUI.ROOT)
self.sw_max_succ=tkinter.StringVar(master=self.vf.GUI.ROOT)
self.sw_max_fail=tkinter.StringVar(master=self.vf.GUI.ROOT)
self.sw_rho=tkinter.StringVar(master=self.vf.GUI.ROOT)
self.sw_lb_rho=tkinter.StringVar(master=self.vf.GUI.ROOT)
self.ls_search_freq=tkinter.StringVar(master=self.vf.GUI.ROOT)
self.set_psw1=tkinter.IntVar(master=self.vf.GUI.ROOT)
self.set_sw1=tkinter.StringVar(master=self.vf.GUI.ROOT)
ifd = self.ifd = InputFormDescr(title = "Local Search Parameters:")
ifd.append( {'name': 'do_local_onlyEnt',
'widgetType':tkinter.Entry,
'wcfg':{
'label': 'Number of LS Runs:',
'textvariable': self.do_local_only,
},
'gridcfg':{'sticky':tkinter.E, 'columnspan':2}})
ifd.append( {'name': 'sw_max_itsEnt',
'widgetType':tkinter.Entry,
'wcfg':{
'label': 'Maximum Number of iterations:',
'textvariable': self.sw_max_its,
},
'gridcfg':{'sticky':tkinter.E, 'columnspan':2}})
ifd.append( {'name': 'sw_max_succEnt',
'widgetType':tkinter.Entry,
'wcfg':{
'label': 'Maximum Number of successes in a row\nbefore changing rho:',
'textvariable': self.sw_max_succ,
},
'gridcfg':{'sticky':tkinter.E, 'columnspan':2}})
ifd.append( {'name': 'sw_max_failEnt',
'widgetType':tkinter.Entry,
'wcfg':{
'label': 'Maximum Number of failures in a row\nbefore changing rho:',
'textvariable': self.sw_max_fail,
},
'gridcfg':{'sticky':tkinter.E, 'columnspan':2}})
ifd.append( {'name': 'sw_rhoEnt',
'widgetType':tkinter.Entry,
'wcfg':{
'label': 'Solis&Wets parameter defining initial variance\nand size of local space to sample (rho):',
'textvariable': self.sw_rho,
},
'gridcfg':{'sticky':tkinter.E, 'columnspan':2}})
ifd.append( {'name': 'sw_lb_rhoEnt',
'widgetType':tkinter.Entry,
'wcfg':{
'label': 'Lower bound on rho:',
'textvariable': self.sw_lb_rho,
},
'gridcfg':{'sticky':tkinter.E, 'columnspan':2}})
ifd.append( {'name': 'ls_search_freqEnt',
'widgetType':tkinter.Entry,
'wcfg':{
'label': 'Probability of any particular phenotype being\nsubjected to local search:',
'textvariable': self.ls_search_freq,
},
'gridcfg':{'sticky':tkinter.E, 'columnspan':2}})
ifd.append( {'name': 'lsChoiceLab',
'widgetType':tkinter.Label,
'text': 'FOR LOCAL SEARCH, USE: ',
'gridcfg':{'sticky':tkinter.W + tkinter.E, 'columnspan':2}})
ifd.append( {'name': 'swLab',
'widgetType':tkinter.Label,
'text': 'Solis & Wets with uniform variances:',
'gridcfg':{'sticky':tkinter.E}})
ifd.append({'name': 'swRb',
'widgetType':tkinter.Radiobutton,
'wcfg': {'value':0},
'variable': self.set_psw1,
'gridcfg':{'sticky':tkinter.W, 'row':-1,'column':1}})
ifd.append( {'name': 'pswLab',
'widgetType':tkinter.Label,
'text': 'pseudo-Solis & Wets with relative variances:',
'gridcfg':{'sticky':tkinter.E}})
ifd.append({'name': 'psw',
'widgetType':tkinter.Radiobutton,
'wcfg': {'value':1},
'variable': self.set_psw1,
'gridcfg':{'sticky':tkinter.W, 'row':-1}})
ifd.append({'name': 'acceptB',
'widgetType': tkinter.Button,
'text':'Accept',
'wcfg':{'bd':4},
'gridcfg':{'sticky':tkinter.E+tkinter.W, 'columnspan':3},
'command':self.Accept_cb})
ifd.append({'widgetType': tkinter.Button,
'text':'Close',
'wcfg':{'bd':6},
'gridcfg':{'sticky':tkinter.E+tkinter.W, 'row':-1, 'column':3,'columnspan':3},
'command':self.Close_cb})
def Accept_cb(self, event=None):
changeVals = {}
for item in [ 'do_local_only', 'sw_max_its', 'sw_max_succ',\
'sw_max_fail']:
var = eval('self.'+item)
val = int(var.get())
if self.vf.dpo[item]['value']!= val:
changeVals[item] = val
for item in [ 'sw_rho', 'sw_lb_rho', 'ls_search_freq']:
var = eval('self.'+item)
val = float(var.get())
if self.vf.dpo[item]['value']!= val:
changeVals[item] = val
#have to deal with Boolean set_psw1 specially
if self.set_psw1.get():
if self.vf.dpo['set_psw1']['value']==0:
changeVals['set_psw1']=1
changeVals['set_sw1']=0
else:
if self.vf.dpo['set_psw1']['value']==1:
changeVals['set_psw1']=0
changeVals['set_sw1']=1
if len(list(changeVals.keys()))>0:
changeVals['topCommand'] = 0
self.doitWrapper(*(), **changeVals)
self.form.withdraw()
def Close_cb(self, event=None):
self.form.withdraw()
def doit(self, *args, **kw):
self.vf.ADdpf_setDpo(*(), **kw)
LSGUI=CommandGUI()
LSGUI.addMenuCommand('AutoToolsBar', menuText['AutoDpfMB'],'Local Search Parameters ', cascadeName = menuText['SetSearchParmsMB'])
class SetDockingRunParms(MVCommand):
""" allows user to set these parameters for autodock job: step sizes, energy parameters and format for the output"""
def guiCallback(self):
"""called each time the 'Set Docking Run Parameters' button is selected"""
if not hasattr(self, 'form'):
self.buildForm()
#self.form = self.vf.getUserInput(self.ifd, modal=0, blocking=0)
#self.form.root.protocol('WM_DELETE_WINDOW',self.Close_cb)
#self.ranNumLib.set(1)
#self.ranNumVar1.set(1)
#self.ranNumVar2.set(2)
else:
self.form.root.deiconify()
self.seed1.set(self.vf.dpo['seed']['value'][0])
self.seed2.set(self.vf.dpo['seed']['value'][1])
if self.seed1.get()=='time':
self.ranNumVar1.set('1')
elif self.seed1.get()=='pid':
self.ranNumVar1.set('2')
else:
self.ranNumVar1.set('0')
if self.seed2.get()=='time':
self.ranNumVar2.set('1')
elif self.seed2.get()=='pid':
self.ranNumVar2.set('2')
else:
self.ranNumVar2.set('0')
if self.seed2.get()=='':
self.ranNumLib.set(1)
else:
self.ranNumLib.set(2)
self.set_seeds()
for item in ['userSeedLab1','userSeedEnt1','userSeedLab2','userSeedEnt2']:
self.ifd.entryByName[item]['widget'].grid_forget()
#??intelec, rmsnosym
#first set the ints:
itemList = ['outlev', 'analysis', 'write_all_flag','intelec','rmsref_flag']
varList = [self.outlev, self.analysis, self.write_all_flag, self.intelec, self.rmsref_flag]
for i in range(len(itemList)):
varList[i].set(self.vf.dpo[itemList[i]]['value'])
#update write_all_flag
#self.vf.dpo['write_all_flag']['value'] = self.vf.dpo['write_all_flag']['value']
itemList2 = ['extnrg', 'rmstol','dstep','qstep', 'rmsref']
varList2 = [self.extnrg, self.rmstol, self.dstep, self.qstep, self.rmsref]
for i in range(len(itemList2)):
varList2[i].set(str(self.vf.dpo[itemList2[i]]['value']))
##first set the ints:
#for item in ['outlev']:
##setattr(self,item,self.vf.dpo[item]['value'])
#exec('self.'+item+'.set(self.vf.dpo[\''+item+"\']['value'])")
#next set the floats:
#for item in ['extnrg', 'rmstol','dstep','qstep']:
#exec('self.'+item+'.set(str(self.vf.dpo[\''+item+"\']['value']))")
##setattr(self,item,self.vf.dpo[item]['value'])
#next set the strings:
#for item in ['rmsref']:
#exec('self.'+item+'.set(str(self.vf.dpo[\''+item+"\']['value']))")
##setattr(self,item,self.vf.dpo[item]['value'])
#next set the booleans:
#for item in ['analysis', 'write_all_flag']:
#exec('self.'+item+'.set(self.vf.dpo[\''+item+"\']['value'])")
##setattr(self,item,self.vf.dpo[item]['value'])
#last fudge the lists:
#oldval = self.vf.dpo['tstep']['value']
#newval = ''
#for item in oldval:
# newval = newval + str(item) + ','
#newval = newval[:-1]
newval = self.vf.dpo['tstep']['value']
self.tstep.set(newval)
oldval = self.vf.dpo['e0max']['value']
self.e0max.set(str(oldval[0]))
self.emaxRetries.set(str(oldval[1]))
def buildForm(self):
ifd = self.ifd = InputFormDescr(title = "Set Docking Run Options")
#ranNumLib: 2 is platform independent one from UTexasBiomedicalSchool
#ranNumLib: 1 is system's own implementation
self.showLibOpts = tkinter.IntVar(master=self.vf.GUI.ROOT)
self.showEnergyOpts = tkinter.IntVar(master=self.vf.GUI.ROOT)
self.showStepSizeOpts = tkinter.IntVar(master=self.vf.GUI.ROOT)
self.showOutputOpts = tkinter.IntVar(master=self.vf.GUI.ROOT)
for v in [self.showLibOpts, self.showEnergyOpts, self.showStepSizeOpts,
self.showOutputOpts]:
v.set(0)
self.ranNumLib=tkinter.IntVar(master=self.vf.GUI.ROOT)
self.ranNumVar1=tkinter.StringVar(master=self.vf.GUI.ROOT)
self.ranNumVar2=tkinter.StringVar(master=self.vf.GUI.ROOT)
self.seed1=tkinter.StringVar(master=self.vf.GUI.ROOT)
self.seed2=tkinter.StringVar(master=self.vf.GUI.ROOT)
self.intelec=tkinter.StringVar(master=self.vf.GUI.ROOT)
self.extnrg=tkinter.StringVar(master=self.vf.GUI.ROOT)
self.e0max=tkinter.StringVar(master=self.vf.GUI.ROOT)
self.emaxRetries=tkinter.StringVar(master=self.vf.GUI.ROOT)
self.tstep = tkinter.StringVar(master=self.vf.GUI.ROOT)
self.qstep = tkinter.StringVar(master=self.vf.GUI.ROOT)
self.dstep = tkinter.StringVar(master=self.vf.GUI.ROOT)
self.outlev=tkinter.IntVar(master=self.vf.GUI.ROOT)
self.rmstol=tkinter.StringVar(master=self.vf.GUI.ROOT)
self.rmsref=tkinter.StringVar(master=self.vf.GUI.ROOT)
self.rmsref_flag=tkinter.IntVar(master=self.vf.GUI.ROOT)
self.rmsref_flag.set(0)
self.rmsnosym=tkinter.StringVar(master=self.vf.GUI.ROOT)
self.analysis=tkinter.IntVar(master=self.vf.GUI.ROOT)
self.write_all_flag=tkinter.IntVar(master=self.vf.GUI.ROOT)
self.write_all_flag.set(0)
ifd.append({'widgetType': tkinter.Label,
'text':'for random number generator:',
'wcfg':{'bd':1},
'gridcfg':{'sticky':tkinter.W, 'columnspan':1}})
ifd.append( {'name': 'LibOpts1',
'widgetType':tkinter.Radiobutton,
'variable': self.showLibOpts,
'wcfg': {
'text': 'Use defaults',
'value':0,
'command': self.hideLibOpts,
},
'gridcfg':{'row':-1, 'column':1, 'columnspan':2}})
#'gridcfg':{'sticky':Tkinter.W, 'columnspan':2}})
ifd.append( {'name': 'LibOpts2',
'widgetType':tkinter.Radiobutton,
'variable': self.showLibOpts,
'wcfg': {
'text': 'Select library + set seeds',
'value':1,
'command': self.hideLibOpts,
},
'gridcfg':{'row': -1, 'sticky':tkinter.W, 'column':3, 'columnspan':3}})
ifd.append({'name': 'ranLibLabel',
'widgetType': tkinter.Label,
'text':'For RANDOM NUMBER GENERATOR LIBRARY:',
'wcfg':{'bd':6},
'gridcfg':{'sticky':tkinter.W, 'columnspan':6}})
ifd.append( {'name': 'sysRanNumLibRB1',
'widgetType':tkinter.Radiobutton,
'variable': self.ranNumLib,
'text': 'Built-In Library',
'wcfg': {'value':1},
'command': self.set_seeds,
'gridcfg':{'sticky':tkinter.W}})
ifd.append( {'name': 'indRanNumLibRB1',
'widgetType':tkinter.Radiobutton,
'variable': self.ranNumLib,
'text': 'Platform-Independent Library\n(from UTexas Biomedical School):',
'wcfg': {'value':2},
'command': self.set_seeds,
'gridcfg':{'sticky':tkinter.W,'row':-1,'column':1, 'columnspan':5}})
ifd.append( {'name': 'ranNumChoiceLab',
'widgetType':tkinter.Label,
'text': 'SELECT ONE RANDOM NUMBER GENERATOR SEED:',
'gridcfg':{'sticky':tkinter.W + tkinter.E, 'columnspan':6}})
ifd.append({'name': 'time1',
'widgetType':tkinter.Radiobutton,
'wcfg': {'value':'1'},
'text': 'time',
'variable': self.ranNumVar1,
'gridcfg':{'sticky':tkinter.W},
'command': self.getUserSeed1 })
ifd.append({'name': 'pid1',
'widgetType':tkinter.Radiobutton,
'wcfg': {'value':'2'},
'text': 'pid',
'variable': self.ranNumVar1,
'gridcfg':{'sticky':tkinter.W, 'row':-1,'column':1, 'columnspan':2},
'command': self.getUserSeed1 })
ifd.append({'name': 'userSeedRb1',
'widgetType':tkinter.Radiobutton,
'wcfg': {'value':'0'},
'text': 'user defined',
'variable': self.ranNumVar1,
'gridcfg':{'sticky':tkinter.W, 'row':-1,'columnspan':2, 'column':4},
'command': self.getUserSeed1 })
ifd.append({'name': 'userSeedLab1',
'widgetType':tkinter.Label,
'text': 'Enter Seed 1:',
'gridcfg':{'sticky':tkinter.W+tkinter.E, 'columnspan':4}})
ifd.append( {'name': 'userSeedEnt1',
'widgetType':tkinter.Entry,
'wcfg':{ 'textvariable': self.seed1},
'gridcfg':{'sticky':tkinter.W, 'row':-1, 'column':4}})
ifd.append({'name': 'time2',
'widgetType':tkinter.Radiobutton,
'wcfg': {'value':'1'},
'text': 'time',
'variable': self.ranNumVar2,
'gridcfg':{'sticky':tkinter.W},
'command': self.getUserSeed2 })
ifd.append({'name': 'pid2',
'widgetType':tkinter.Radiobutton,
'text': 'pid',
'wcfg': {'value':'2'},
'variable': self.ranNumVar2,
'gridcfg':{'sticky':tkinter.W, 'row':-1,'column':1,'columnspan':2},
'command': self.getUserSeed2 })
ifd.append({'name': 'userSeedRb2',
'widgetType':tkinter.Radiobutton,
'wcfg': {'value':'0'},
'text': 'user defined',
'variable': self.ranNumVar2,
'gridcfg':{'sticky':tkinter.W, 'row':-1, 'column':4,'columnspan':2},
'command': self.getUserSeed2 })
ifd.append({'name': 'userSeedLab2',
'widgetType':tkinter.Label,
'text': 'Enter Seed 2:',
'gridcfg':{'sticky':tkinter.W+tkinter.E, 'columnspan':4}})
ifd.append( {'name': 'userSeedEnt2',
'widgetType':tkinter.Entry,
'wcfg':{'textvariable': self.seed2},
'gridcfg':{'sticky':tkinter.W, 'row':-1, 'column':4}})
ifd.append({'widgetType': tkinter.Label,
'text':'_______________________________________',
'wcfg':{'bd':1},
'gridcfg':{'sticky':tkinter.E+tkinter.W, 'columnspan':6}})
ifd.append({'widgetType': tkinter.Label,
'text':'for energy parameters:',
'wcfg':{'bd':1},
'gridcfg':{'sticky':tkinter.W, 'columnspan':1}})
ifd.append( {'name': 'EnergyOpts1',
'widgetType':tkinter.Radiobutton,
'variable': self.showEnergyOpts,
'wcfg': {
'text': 'Use defaults',
'value':0,
'command': self.hideEnergyOpts,
},
'gridcfg':{'row':-1, 'column':1, 'columnspan':2}})
#'gridcfg':{'sticky':Tkinter.W, 'columnspan':2}})
ifd.append( {'name': 'EnergyOpts2',
'widgetType':tkinter.Radiobutton,
'variable': self.showEnergyOpts,
'wcfg': {
'text': 'Customize energy parameters',
'value':1,
'command': self.hideEnergyOpts,
},
#'gridcfg':{'row': -1, 'column':3, 'columnspan':2}})
'gridcfg':{'row': -1, 'sticky':tkinter.W, 'column':3, 'columnspan':3}})
ifd.append({'name': 'energyLabel',
'widgetType': tkinter.Label,
'text':'ENERGY PARAMETERS:',
'wcfg':{'bd':1},
'gridcfg':{'sticky':tkinter.W+ tkinter.E, 'columnspan':6}})
ifd.append( {'name': 'extnrgLab',
'widgetType':tkinter.Label,
'wcfg':{
'text': 'External Grid Energy',
},
'gridcfg':{'sticky':tkinter.E, 'columnspan':4}})
ifd.append( {'name': 'extnrgEnt',
'widgetType':tkinter.Entry,
'wcfg':{
'textvariable': self.extnrg
},
'gridcfg':{'sticky':tkinter.W,'row':-1, 'column':5, 'columnspan':2}})
ifd.append( {'name': 'e0maxLab',
'widgetType':tkinter.Label,
'wcfg':{
'text': 'Maximum allowable initial energy:',
},
'gridcfg':{'sticky':tkinter.E, 'columnspan':4}})
ifd.append( {'name': 'e0maxEnt',
'widgetType':tkinter.Entry,
'wcfg':{
'textvariable': self.e0max
},
'gridcfg':{'sticky':tkinter.W,'row':-1, 'column':5, 'columnspan':2}})
#'gridcfg':{'sticky':Tkinter.E, 'columnspan':6}})
ifd.append( {'name': 'emaxRetriesLab',
'widgetType':tkinter.Label,
'wcfg':{
'text': 'Maximum Number of Retries:',
},
'gridcfg':{'sticky':tkinter.E, 'columnspan':4}})
ifd.append( {'name': 'emaxRetriesEnt',
'widgetType':tkinter.Entry,
'wcfg':{
'textvariable': self.emaxRetries
},
'gridcfg':{'sticky':tkinter.W,'row':-1, 'column':5, 'columnspan':2}})
#'gridcfg':{'sticky':Tkinter.E, 'columnspan':6}})
ifd.append( {'name': 'intelecLab1',
'widgetType':tkinter.Label,
'text': 'Calculate internal electrostatic energy:',
'gridcfg':{'sticky':tkinter.E, 'columnspan':2}})
ifd.append( {'name': 'intelecRB1',
'widgetType':tkinter.Radiobutton,
'variable': self.intelec,
'text': 'Yes',
'wcfg': {'value':'1'},
'gridcfg':{'sticky':tkinter.E,'row':-1, 'columnspan':2,'column':2}})
ifd.append( {'name': 'intelecRB0',
'widgetType':tkinter.Radiobutton,
'variable': self.intelec,
'text': 'No',
'wcfg': {'value':'0'},
'gridcfg':{'sticky':tkinter.E + tkinter.W,'row':-1,'columnspan':2, 'column':4}})
ifd.append({'widgetType': tkinter.Label,
'text':'_______________________________________',
'wcfg':{'bd':1},
'gridcfg':{'sticky':tkinter.E+tkinter.W, 'columnspan':6}})
ifd.append({'widgetType': tkinter.Label,
'text':'for step size parameters:',
'wcfg':{'bd':1},
'gridcfg':{'sticky':tkinter.W, 'columnspan':1}})
ifd.append( {'name': 'StepSizeOpts1',
'widgetType':tkinter.Radiobutton,
'variable': self.showStepSizeOpts,
'wcfg': {
'text': 'Use defaults',
'value':0,
'command': self.hideStepSizeOpts,
},
'gridcfg':{'row':-1, 'column':1, 'columnspan':2}})
#'gridcfg':{'sticky':Tkinter.W, 'columnspan':2}})
ifd.append( {'name': 'StepSizeOpts2',
'widgetType':tkinter.Radiobutton,
'variable': self.showStepSizeOpts,
'wcfg': {
'text': 'Customize step size parameters',
'value':1,
'command': self.hideStepSizeOpts,
},
#'gridcfg':{'row': -1, 'column':3, 'columnspan':2}})
'gridcfg':{'row': -1, 'sticky':tkinter.W, 'column':3, 'columnspan':3}})
ifd.append({'name':'stepSizeLab',
'widgetType': tkinter.Label,
'text':'STEP SIZE PARAMETERS:',
'wcfg':{'bd':1},
'gridcfg':{'sticky':tkinter.W+tkinter.E, 'columnspan':6}})
ifd.append( {'name': 'tstepLab',
'widgetType':tkinter.Label,
'wcfg':{
'text': 'Translation (Angstrom/step):\nEnter values for 1st , last cycles to have AutoDock calculate trnrf',
},
'gridcfg':{'sticky':tkinter.E, | |
# Copyright (c) 2022, NVIDIA CORPORATION.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
import cupy as cp
import numpy as np
from pylibcugraph import (ResourceHandle,
GraphProperties,
SGGraph,
node2vec)
from cugraph.testing import utils
import cugraph
COMPRESSED = [False, True]
LINE = utils.RAPIDS_DATASET_ROOT_DIR_PATH/"small_line.csv"
# =============================================================================
# Test data
# =============================================================================
# The result names correspond to the datasets defined in conftest.py
# Note: the only deterministic path(s) in the following datasets
# are contained in Simple_1
_test_data = {"karate.csv": {
"seeds": cp.asarray([0, 0], dtype=np.int32),
"paths": cp.asarray([0, 8, 33, 29, 26, 0, 1, 3, 13, 33],
dtype=np.int32),
"weights": cp.asarray([1., 1., 1., 1., 1., 1., 1., 1.],
dtype=np.float32),
"path_sizes": cp.asarray([5, 5], dtype=np.int32),
"max_depth": 5
},
"dolphins.csv": {
"seeds": cp.asarray([11], dtype=np.int32),
"paths": cp.asarray([11, 51, 11, 51],
dtype=np.int32),
"weights": cp.asarray([1., 1., 1.],
dtype=np.float32),
"path_sizes": cp.asarray([4], dtype=np.int32),
"max_depth": 4
},
"Simple_1": {
"seeds": cp.asarray([0, 3], dtype=np.int32),
"paths": cp.asarray([0, 1, 2, 3],
dtype=np.int32),
"weights": cp.asarray([1., 1.],
dtype=np.float32),
"path_sizes": cp.asarray([3, 1], dtype=np.int32),
"max_depth": 3
},
"Simple_2": {
"seeds": cp.asarray([0, 3], dtype=np.int32),
"paths": cp.asarray([0, 1, 3, 5, 3, 5],
dtype=np.int32),
"weights": cp.asarray([0.1, 2.1, 7.2, 7.2],
dtype=np.float32),
"path_sizes": cp.asarray([4, 2], dtype=np.int32),
"max_depth": 4
},
}
# =============================================================================
# Test helpers
# =============================================================================
def _get_param_args(param_name, param_values):
"""
Returns a tuple of (<param_name>, <pytest.param list>) which can be applied
as the args to pytest.mark.parametrize(). The pytest.param list also
contains param id string formed from teh param name and values.
"""
return (param_name,
[pytest.param(v, id=f"{param_name}={v}") for v in param_values])
def _run_node2vec(src_arr,
dst_arr,
wgt_arr,
seeds,
num_vertices,
num_edges,
max_depth,
compressed_result,
p,
q,
renumbered):
"""
Builds a graph from the input arrays and runs node2vec using the other args
to this function, then checks the output for validity.
"""
resource_handle = ResourceHandle()
graph_props = GraphProperties(is_symmetric=False, is_multigraph=False)
G = SGGraph(resource_handle, graph_props, src_arr, dst_arr, wgt_arr,
store_transposed=False, renumber=renumbered,
do_expensive_check=True)
(paths, weights, sizes) = node2vec(resource_handle, G, seeds, max_depth,
compressed_result, p, q)
num_seeds = len(seeds)
# Validating results of node2vec by checking each path
M = np.zeros((num_vertices, num_vertices), dtype=np.float64)
h_src_arr = src_arr.get()
h_dst_arr = dst_arr.get()
h_wgt_arr = wgt_arr.get()
h_paths = paths.get()
h_weights = weights.get()
for i in range(num_edges):
M[h_src_arr[i]][h_dst_arr[i]] = h_wgt_arr[i]
if compressed_result:
path_offsets = np.zeros(num_seeds + 1, dtype=np.int32)
path_offsets[0] = 0
for i in range(num_seeds):
path_offsets[i + 1] = path_offsets[i] + sizes.get()[i]
for i in range(num_seeds):
for j in range(path_offsets[i], (path_offsets[i + 1] - 1)):
actual_wgt = h_weights[j - i]
expected_wgt = M[h_paths[j]][h_paths[j + 1]]
if pytest.approx(expected_wgt, 1e-4) != actual_wgt:
s = h_paths[j]
d = h_paths[j+1]
raise ValueError(f"Edge ({s},{d}) has wgt {actual_wgt}, "
f"should have been {expected_wgt}")
else:
max_path_length = int(len(paths) / num_seeds)
for i in range(num_seeds):
for j in range(max_path_length - 1):
curr_idx = i * max_path_length + j
next_idx = i * max_path_length + j + 1
if (h_paths[next_idx] != num_vertices):
actual_wgt = h_weights[i * (max_path_length - 1) + j]
expected_wgt = M[h_paths[curr_idx]][h_paths[next_idx]]
if pytest.approx(expected_wgt, 1e-4) != actual_wgt:
s = h_paths[j]
d = h_paths[j+1]
raise ValueError(f"Edge ({s},{d}) has wgt {actual_wgt}"
f", should have been {expected_wgt}")
# =============================================================================
# Pytest fixtures
# =============================================================================
# fixtures used in this test module are defined in conftest.py
# =============================================================================
# Tests adapted from libcugraph
# =============================================================================
def test_node2vec_short():
num_edges = 8
num_vertices = 6
src = cp.asarray([0, 1, 1, 2, 2, 2, 3, 4], dtype=np.int32)
dst = cp.asarray([1, 3, 4, 0, 1, 3, 5, 5], dtype=np.int32)
wgt = cp.asarray([0.1, 2.1, 1.1, 5.1, 3.1, 4.1, 7.2, 3.2],
dtype=np.float32)
seeds = cp.asarray([0, 0], dtype=np.int32)
max_depth = 4
_run_node2vec(src, dst, wgt, seeds, num_vertices, num_edges, max_depth,
False, 0.8, 0.5, False)
def test_node2vec_short_dense():
num_edges = 8
num_vertices = 6
src = cp.asarray([0, 1, 1, 2, 2, 2, 3, 4], dtype=np.int32)
dst = cp.asarray([1, 3, 4, 0, 1, 3, 5, 5], dtype=np.int32)
wgt = cp.asarray([0.1, 2.1, 1.1, 5.1, 3.1, 4.1, 7.2, 3.2],
dtype=np.float32)
seeds = cp.asarray([2, 3], dtype=np.int32)
max_depth = 4
_run_node2vec(src, dst, wgt, seeds, num_vertices, num_edges, max_depth,
False, 0.8, 0.5, False)
def test_node2vec_short_sparse():
num_edges = 8
num_vertices = 6
src = cp.asarray([0, 1, 1, 2, 2, 2, 3, 4], dtype=np.int32)
dst = cp.asarray([1, 3, 4, 0, 1, 3, 5, 5], dtype=np.int32)
wgt = cp.asarray([0.1, 2.1, 1.1, 5.1, 3.1, 4.1, 7.2, 3.2],
dtype=np.float32)
seeds = cp.asarray([2, 3], dtype=np.int32)
max_depth = 4
_run_node2vec(src, dst, wgt, seeds, num_vertices, num_edges, max_depth,
True, 0.8, 0.5, False)
@pytest.mark.parametrize(*_get_param_args("compress_result", [True, False]))
@pytest.mark.parametrize(*_get_param_args("renumbered", [True, False]))
def test_node2vec_karate(compress_result, renumbered):
num_edges = 156
num_vertices = 34
src = cp.asarray([1, 2, 3, 4, 5, 6, 7, 8, 10, 11, 12, 13, 17, 19, 21, 31,
2, 3, 7, 13, 17, 19, 21, 30, 3, 7, 8, 9, 13, 27, 28,
32, 7, 12, 13, 6, 10, 6, 10, 16, 16, 30, 32, 33, 33,
33, 32, 33, 32, 33, 32, 33, 33, 32, 33, 32, 33, 25, 27,
29, 32, 33, 25, 27, 31, 31, 29, 33, 33, 31, 33, 32, 33,
32, 33, 32, 33, 33, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2,
2, 2, 3, 3, 3, 4, 4, 5, 5, 5, 6, 8, 8, 8, 9, 13, 14,
14, 15, 15, 18, 18, 19, 20, 20, 22, 22, 23, 23, 23, 23,
23, 24, 24, 24, 25, 26, 26, 27, 28, 28, 29, 29, 30, 30,
31, 31, 32],
dtype=np.int32)
dst = cp.asarray([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1,
1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 4,
4, 5, 5, 5, 6, 8, 8, 8, 9, 13, 14, 14, 15, 15, 18, 18,
19, 20, 20, 22, 22, 23, 23, 23, 23, 23, 24, 24, 24, 25,
26, 26, 27, 28, 28, 29, 29, 30, 30, 31, 31, 32, 1, 2,
3, 4, 5, 6, 7, 8, 10, 11, 12, 13, 17, 19, 21, 31, 2, 3,
7, 13, 17, 19, 21, 30, 3, 7, 8, 9, 13, 27, 28, 32, 7,
12, 13, 6, 10, 6, 10, 16, 16, 30, 32, 33, 33, 33, 32,
33, 32, 33, 32, 33, 33, 32, 33, 32, 33, 25, 27, 29, 32,
33, 25, 27, 31, 31, 29, 33, 33, 31, 33, 32, 33, 32, 33,
32, 33, 33],
dtype=np.int32)
wgt = cp.asarray([1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0],
dtype=np.float32)
seeds = cp.asarray([12, 28, 20, 23, 15, 26], dtype=np.int32)
max_depth = 5
_run_node2vec(src, dst, wgt, seeds, num_vertices, num_edges, max_depth,
compress_result, 0.8, 0.5, renumbered)
# =============================================================================
# Tests
# =============================================================================
@pytest.mark.parametrize(*_get_param_args("compress_result", [True, False]))
def test_node2vec(sg_graph_objs, compress_result):
(g, resource_handle, ds_name) = sg_graph_objs
(seeds, expected_paths, expected_weights, expected_path_sizes, max_depth) \
= _test_data[ds_name].values()
p = 0.8
q = 0.5
result = node2vec(resource_handle, g, seeds, max_depth,
compress_result, p, q)
(actual_paths, actual_weights, actual_path_sizes) = result
num_paths = | |
<reponame>yjbanov/chromium_build<filename>scripts/slave/recipe_modules/v8/testing.py<gh_stars>0
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import re
from recipe_engine.types import freeze
TEST_CONFIGS = freeze({
'benchmarks': {
'name': 'Benchmarks',
'tests': ['benchmarks'],
'test_args': ['--download-data'],
},
'deopt': {
'tool': 'run-deopt-fuzzer',
'isolated_target': 'run-deopt-fuzzer',
},
'jsfunfuzz': {
'tool': 'jsfunfuzz',
'isolated_target': 'jsfunfuzz',
},
'gcmole': {
'tool': 'run-gcmole',
'isolated_target': 'run-gcmole',
},
'ignition': {
'name': 'Ignition',
'tests': ['ignition'],
'test_args': ['--variants=ignition', '--ignition'],
},
'mjsunit': {
'name': 'Mjsunit',
'tests': ['mjsunit'],
},
'mjsunit_sp_frame_access': {
'name': 'Mjsunit - sp frame access',
'tests': ['mjsunit'],
'test_args': [
'--variants=turbofan', '--extra-flags=--turbo_sp_frame_access'],
},
'mozilla': {
'name': 'Mozilla',
'tests': ['mozilla'],
},
'optimize_for_size': {
'name': 'OptimizeForSize',
'tests': ['optimize_for_size'],
'suite_mapping': ['mjsunit', 'cctest', 'webkit', 'intl'],
'test_args': ['--no-variants', '--extra-flags=--optimize-for-size'],
},
'simdjs': {
'name': 'SimdJs - all',
'tests': ['simdjs'],
'test_args': ['--download-data'],
},
'simpleleak': {
'tool': 'run-valgrind',
'isolated_target': 'run-valgrind',
},
'test262': {
'name': 'Test262 - no variants',
'tests': ['test262'],
'test_args': ['--no-variants', '--download-data'],
},
'test262_ignition': {
'name': 'Test262 - ignition',
'tests': ['test262'],
'test_args': ['--variants=ignition', '--ignition'],
},
'test262_variants': {
'name': 'Test262',
'tests': ['test262'],
'test_args': ['--download-data'],
},
'unittests': {
'name': 'Unittests',
'tests': ['unittests'],
},
'v8initializers': {
'tool': 'check-static-initializers',
'isolated_target': 'check-static-initializers',
},
'v8testing': {
'name': 'Check',
'tests': ['bot_default'],
'suite_mapping': [
'mjsunit', 'cctest', 'webkit', 'message', 'preparser', 'intl'],
},
'webkit': {
'name': 'Webkit',
'tests': ['webkit'],
},
})
class BaseTest(object):
def __init__(self, test_step_config, api, v8):
self.test_step_config = test_step_config
self.name = test_step_config.name
self.api = api
self.v8 = v8
def _get_isolated_hash(self, test):
isolated = test.get('isolated_target')
if not isolated:
# Normally we run only one test and the isolate name is the same as the
# test name.
assert len(test['tests']) == 1
isolated = test['tests'][0]
isolated_hash = self.v8.isolated_tests.get(isolated)
# TODO(machenbach): Maybe this is too hard. Implement a more forgiving
# solution.
assert isolated_hash
return isolated_hash
@property
def uses_swarming(self):
"""Returns true if the test uses swarming."""
return False
def apply_filter(self):
# Run all tests by default.
return True
def pre_run(self, test=None, **kwargs): # pragma: no cover
pass
def run(self, test=None, **kwargs): # pragma: no cover
raise NotImplementedError()
def rerun(self, failure_dict, **kwargs): # pragma: no cover
raise NotImplementedError()
class V8Test(BaseTest):
def apply_filter(self):
self.applied_test_filter = self.v8._applied_test_filter(
TEST_CONFIGS[self.name])
if self.v8.test_filter and not self.applied_test_filter:
self.api.step(TEST_CONFIGS[self.name]['name'] + ' - skipped', cmd=None)
return False
return True
def run(self, test=None, **kwargs):
test = test or TEST_CONFIGS[self.name]
def step_test_data():
return self.v8.test_api.output_json(
self.v8._test_data.get('test_failures', False),
self.v8._test_data.get('wrong_results', False),
self.v8._test_data.get('flakes', False))
full_args, env = self.v8._setup_test_runner(test, self.applied_test_filter)
if self.v8.c.testing.may_shard and self.v8.c.testing.SHARD_COUNT > 1:
full_args += [
'--shard-count=%d' % self.v8.c.testing.SHARD_COUNT,
'--shard-run=%d' % self.v8.c.testing.SHARD_RUN,
]
full_args += [
'--json-test-results',
self.api.json.output(add_json_log=False),
]
self.api.python(
test['name'],
self.api.path['checkout'].join('tools', 'run-tests.py'),
full_args,
cwd=self.api.path['checkout'],
env=env,
step_test_data=step_test_data,
**kwargs
)
return self.post_run(test)
def post_run(self, test):
# The active step was either a local test run or the swarming collect step.
step_result = self.api.step.active_result
json_output = step_result.json.output
# Log used test filters.
if self.applied_test_filter:
step_result.presentation.logs['test filter'] = self.applied_test_filter
# The output is expected to be a list of architecture dicts that
# each contain a results list. On buildbot, there is only one
# architecture.
assert len(json_output) == 1
self.v8._update_durations(json_output[0], step_result.presentation)
failure_factory=Failure.factory_func(self.test_step_config)
failure_log, failures, flake_log, flakes = (
self.v8._get_failure_logs(json_output[0], failure_factory))
self.v8._update_failure_presentation(
failure_log, failures, step_result.presentation)
if failure_log and failures:
# Mark the test step as failure only if there were real failures (i.e.
# non-flakes) present.
step_result.presentation.status = self.api.step.FAILURE
if flake_log and flakes:
# Emit a separate step to show flakes from the previous step
# to not close the tree.
step_result = self.api.step(test['name'] + ' (flakes)', cmd=None)
step_result.presentation.status = self.api.step.WARNING
self.v8._update_failure_presentation(
flake_log, flakes, step_result.presentation)
return TestResults(failures, flakes, [])
def _setup_rerun_config(self, failure_dict):
"""Return: A test config that reproduces a specific failure."""
# Make sure bisection is only activated on builders that give enough
# information to retry.
assert failure_dict.get('variant')
assert failure_dict.get('random_seed')
orig_config = TEST_CONFIGS[self.name]
# If not specified, the isolated target is the same as the first test of
# the original list. We need to set it explicitly now, as the tests
# parameter changes on rerun, but the isolated target is still the same.
isolated_target = orig_config.get(
'isolated_target', orig_config['tests'][0])
# Filter variant manipulation from test arguments.
# We'll specify exactly the variant which failed.
orig_args = [x for x in orig_config.get('test_args', [])
if x != '--no-variants']
new_args = [
'--variants', failure_dict['variant'],
'--random-seed', failure_dict['random_seed'],
]
rerun_config = {
'name': 'Retry',
'isolated_target': isolated_target,
'tests': [failure_dict['name']],
'test_args' : orig_args + new_args,
}
# Switch off test filters on rerun.
self.applied_test_filter = None
return rerun_config
def rerun(self, failure_dict, **kwargs):
return self.run(test=self._setup_rerun_config(failure_dict), **kwargs)
class V8SwarmingTest(V8Test):
@property
def uses_swarming(self):
"""Returns true if the test uses swarming."""
return True
def _v8_collect_step(self, task, **kwargs):
"""Produces a step that collects and processes a result of a v8 task."""
# Placeholder for the merged json output.
json_output = self.api.json.output(add_json_log=False)
# Shim script's own arguments.
args = [
'--swarming-client-dir', self.api.swarming_client.path,
'--temp-root-dir', self.api.path['tmp_base'],
'--merged-test-output', json_output,
]
# Arguments for actual 'collect' command.
args.append('--')
args.extend(self.api.swarming.get_collect_cmd_args(task))
# We need to wait longer for tasks on arm as there the hard
# timeout and expiration are also higher.
if (self.task.dimensions.get('cpu') and
self.task.dimensions['cpu'].startswith('arm')):
args.extend(['--timeout', '%d' % (7 * 60 * 60)])
return self.api.python(
name=self.test['name'],
script=self.v8.resource('collect_v8_task.py'),
args=args,
allow_subannotations=True,
step_test_data=kwargs.pop('step_test_data', None),
**kwargs)
def pre_run(self, test=None, **kwargs):
# Set up arguments for test runner.
self.test = test or TEST_CONFIGS[self.name]
extra_args, _ = self.v8._setup_test_runner(
self.test, self.applied_test_filter)
# Let json results be stored in swarming's output folder. The collect
# step will copy the folder's contents back to the client.
extra_args += [
'--swarming',
'--json-test-results',
'${ISOLATED_OUTDIR}/output.json',
]
# Initialize number of shards, either per test or per builder.
shards = 1
if self.v8.c.testing.may_shard:
shards = self.test_step_config.shards
if self.v8.c.testing.SHARD_COUNT > 1: # pragma: no cover
shards = self.v8.c.testing.SHARD_COUNT
# Initialize swarming task with custom data-collection step for v8
# test-runner output.
self.task = self.api.swarming.task(
title=self.test['name'],
isolated_hash=self._get_isolated_hash(self.test),
shards=shards,
extra_args=extra_args,
)
self.task.collect_step = lambda task, **kw: (
self._v8_collect_step(task, **kw))
# Add custom dimensions.
if self.v8.bot_config.get('swarming_dimensions'):
self.task.dimensions.update(self.v8.bot_config['swarming_dimensions'])
# Set default value.
if 'os' not in self.task.dimensions: # pragma: no cover
# TODO(machenbach): Remove pragma as soon as there's a builder without
# default value.
self.task.dimensions['os'] = self.api.swarming.prefered_os_dimension(
self.api.platform.name)
# Increase default timeout and expiration on arm.
if (self.task.dimensions.get('cpu') and
self.task.dimensions['cpu'].startswith('arm')):
self.task.hard_timeout = 60 * 60
self.task.expiration = 6 * 60 * 60
self.api.swarming.trigger_task(self.task)
def run(self, **kwargs):
# TODO(machenbach): Soften this when softening 'assert isolated_hash'
# above.
assert self.task
try:
# Collect swarming results. Use the same test simulation data for the
# swarming collect step like for local testing.
result = self.api.swarming.collect_task(
self.task,
step_test_data=lambda: self.v8.test_api.output_json(),
)
finally:
# Note: Exceptions from post_run might hide a pending exception from the
# try block.
return self.post_run(self.test)
def rerun(self, failure_dict, **kwargs):
self.pre_run(test=self._setup_rerun_config(failure_dict), **kwargs)
return self.run(**kwargs)
class V8Presubmit(BaseTest):
def run(self, **kwargs):
self.api.python(
'Presubmit',
self.api.path['checkout'].join('tools', 'presubmit.py'),
cwd=self.api.path['checkout'],
)
return TestResults.empty()
class V8GenericSwarmingTest(BaseTest):
def __init__(self, test_step_config, api, v8,
title='Generic test', extra_args=None):
super(V8GenericSwarmingTest, self).__init__(test_step_config, api, v8)
self._extra_args = extra_args or []
self._title = title
@property
def title(self):
return self._title # pragma: no cover
@property
def extra_args(self):
return self._extra_args # pragma: no cover
@property
def task_output_dir(self):
return None # pragma: no cover
@property
def uses_swarming(self):
"""Returns true if the test uses swarming."""
return True
def pre_run(self, test=None, **kwargs):
self.test = test or TEST_CONFIGS[self.name]
self.task = self.api.swarming.task(
title=self.title,
isolated_hash=self._get_isolated_hash(self.test),
extra_args=self.extra_args,
task_output_dir=self.task_output_dir,
)
# Set default value.
if 'os' not in self.task.dimensions:
self.task.dimensions['os'] = self.api.swarming.prefered_os_dimension(
self.api.platform.name)
self.api.swarming.trigger_task(self.task)
def run(self, **kwargs):
assert self.task
self.api.swarming.collect_task(self.task)
return TestResults.empty()
class V8CompositeSwarmingTest(BaseTest):
@property
def composite_tests(self):
"""Returns: An iterable of V8GenericSwarmingTest instances."""
raise NotImplementedError() # pragma: no cover
@property
def uses_swarming(self):
"""Returns true if the test uses swarming."""
return True
def pre_run(self, test=None, **kwargs):
self.composites = list(self.composite_tests)
for c in self.composites:
c.pre_run(test, **kwargs)
def run(self, **kwargs):
for c in self.composites:
c.run(**kwargs)
return TestResults.empty()
class V8CheckInitializers(V8GenericSwarmingTest):
@property
def title(self):
return 'Static-Initializers'
@property
def extra_args(self):
return [self.v8.relative_path_to_d8]
class V8Fuzzer(V8GenericSwarmingTest):
def __init__(self, test_step_config, api, v8,
title='Generic test', extra_args=None):
self.output_dir = api.path.mkdtemp('swarming_output')
self.archive = 'fuzz-results-%s.tar.bz2' % (
api.properties['parent_got_revision'])
super(V8Fuzzer, self).__init__(
test_step_config, api, v8,
title='Fuzz',
extra_args=[
v8.relative_path_to_d8,
'${ISOLATED_OUTDIR}/%s' % self.archive,
],
)
@property
def task_output_dir(self):
return self.output_dir
def run(self, **kwargs):
try:
super(V8Fuzzer, self).run(**kwargs)
except self.api.step.StepFailure as e:
self.api.gsutil.upload(
self.output_dir.join('0', self.archive),
'chromium-v8',
self.api.path.join('fuzzer-archives', self.archive),
| |
label = 'Tumor-Normal-{}'.format(tissue)
else:
n = gtex[gtex.tissue == tissue][gene].median()
label = 'Tumor-GTEx-{}'.format(tissue)
# Calculate l2fc for each tumor sample and save
l2fcs = []
for i, row in tumor[tumor.tissue == tissue].iterrows():
l2fcs.append(log2fc(row[gene], n))
# Create distribution
dists.append(hv.Distribution(l2fcs, kdims=[xdim], label=label))
return hv.Overlay(dists, label='{} Expression'.format(gene)).opts(self._gene_kde_opts)
def l2fc_by_perc_samples(self, gene, tissue_subset=None, tcga_normal=False, l2fc_cutoff=2):
"""
Calculate the percentage of samples greater than a range of log2 fold change values
:param str gene: Gene (ex: ERBB2) to select
:param list tissue_subset: List of tissues to subset by
:param bool tcga_normal: If True, use TCGA normal to for DE calc, otherwise use GTEx
:param float l2fc_cutoff: Specifies the L2FC cutoff to draw a Spike object
:return: Collection of Curve objects
:rtype: hv.Overlay
"""
# Subset dataframe by gene and tissue subset
df = self._subset([gene], tissue_subset)
# Subset by dataset
tumor, normal, gtex = subset_by_dataset(df)
# Create X dimension
xdim = hv.Dimension('Log2 Fold Change', unit='log2(a+1)/log2(b+1)')
ydim = hv.Dimension('Tumor Samples With Greater L2FC', unit='%')
# Calculate % samples over a given l2fc
curves = []
label = ''
for tissue in sorted(df.tissue.unique()):
# Calculate mean expression for normal
if tcga_normal:
n = normal[normal.tissue == tissue][gene].median()
label = 'Tumor-Normal'
else:
n = gtex[gtex.tissue == tissue][gene].median()
label = 'Tumor-GTEx'
# Calculate l2fc for each tumor sample and save
l2fcs = []
for i, row in tumor[tumor.tissue == tissue].iterrows():
l2fcs.append(log2fc(row[gene], n))
# Calculate percentage samples over l2fc
percentages = {}
l2fc_range = [x * 0.1 for x in xrange(0, int(np.ceil(max(l2fcs) * 10)))]
for l2fc in l2fc_range:
percentages[l2fc] = len([x for x in l2fcs if x >= l2fc]) / len(l2fcs) * 100
# Create line object
curves.append(hv.Area(percentages, kdims=[xdim], vdims=[ydim], label=tissue))
# Return curves along with a Spikes object at the l2fc cutoff
overlay = hv.Overlay(curves + [hv.Spikes([l2fc_cutoff])], label='{} {} Expression'.format(label, gene))
return overlay.opts(self._l2fc_by_perc_samples_opts)
def gene_de_heatmap(self, genes, tissue_subset=None, tcga_normal=False):
"""
Heatmap of gene log2 fold change
:param list(str) genes: Gene (ex: ERBB2) to select
:param list tissue_subset: List of tissues to subset by
:param bool tcga_normal: If True, use TCGA normal to for DE calc, otherwise use GTEx
:return: DE Heatmap of genes for tissue subset
:rtype: hv.HeatMap
"""
# Subset dataframe by genes
df = self.df[self.df_cols + genes]
# Subset by tissues
if tissue_subset:
df = df[df.tissue.isin(tissue_subset)]
# Subset by dataset
tumor, normal, gtex = subset_by_dataset(df)
# For each tissue/gene, calculate L2FC
records = []
for tissue in sorted(df.tissue.unique()):
for gene in genes:
# Calculate mean expression for normal
if tcga_normal:
n = normal[normal.tissue == tissue][gene].median()
else:
n = gtex[gtex.tissue == tissue][gene].median()
# Calculate expression for tumor and compute l2fc
t = tumor[tumor.tissue == tissue][gene].median()
l2fc = log2fc(t, n)
records.append([tissue, gene, l2fc])
# Create dataframe and define dimensions
df = pd.DataFrame.from_records(records, columns=['Tissue', 'Gene', 'L2FC']).sort_values('Tissue')
return hv.HeatMap(df, kdims=['Gene', 'Tissue'], vdims=['L2FC']).opts(self._gene_de_heatmap_opts)
def tissue_top_de_genes(self, tissue):
# Create DE objects to get data
gtex = self.tissue_de(tissue).data
tcga = self.tissue_de(tissue, tcga_normal=True).data
intervals = [10, 100, 500, 1000, 5000, 10000, len(self.genes)]
# Calculate maximum arange for plot
reg_line_arange = gtex[gtex.exp > gtex.exp.median()].sort_values('l2fc', ascending=False).l2fc.tolist()
# Top DE genes with high expression
hmaps = {}
for i in intervals:
x = gtex[gtex.exp > gtex.exp.median()].sort_values('l2fc', ascending=False).l2fc.tolist()[:i]
y = tcga[tcga.exp > tcga.exp.median()].sort_values('l2fc', ascending=False).l2fc.tolist()[:i]
scatter = hv.Scatter((x, y), kdims=['GTEx L2FC'], vdims=['TCGA L2FC'])
reg_line = hv.Curve(self.regression_line(x, y, arange=reg_line_arange))
pearson_r = round(pearsonr(x, y)[0], 2)
title = 'R: {}'.format(pearson_r)
hmaps[i] = hv.Overlay([scatter, reg_line]).relabel(title)
return hv.HoloMap(hmaps, kdims='Num_Genes').relabel('Top DE Gene L2FC in {}'.format(tissue))
# Misc plots
def dist_with_iqr_bounds(self, ys, kdim):
"""
Creates distribution object with IQR bounds
:param list ys: List of values to calculate IQR and bounds
:param str kdim: K-dimension label for distribution
:return: Distribution with IQR bounds
:rtype: hv.Overlay
"""
# Calculate IQR and outlier bounds
q25, q75 = np.percentile(ys, [25, 75])
upper, lower = iqr_bounds(ys)
# Return dist with spikes
return hv.Overlay([hv.Distribution(ys, kdims=[kdim]),
hv.Spikes([q25, q75]),
hv.Spikes([lower, upper])]).opts(self._dist_with_iqr_bounds_opts)
@staticmethod
def regression_line(x, y, arange=None):
"""
Returns x/y vectors of a regression line for 2D input
:param np.array x: Vector of x values
:param np.array y: Vector of y values
:param np.array arange: Provide a custom arange to generate regression line
:return: Regression line vectors
:rtype: tuple(np.array, np.array)
"""
m, b = np.polyfit(x, y, 1)
reg_x = np.arange(min(arange), max(arange)) if arange else np.arange(min(x), max(x))
return reg_x, m * reg_x + b
@staticmethod
def path_box(xmin, xmax, ymin, ymax, color=None):
"""
Returns rectangular Path object for a given set of x/y coordinates
:param float xmin: xmin of box
:param float xmax: xmax of box
:param float ymin: ymin of box
:param float ymax: ymax of box
:param str color: Set the color of the Path object
:return: Rectangular path object
:rtype: hv.Path
"""
path = [(xmin, ymin), (xmax, ymin), (xmax, ymax), (xmin, ymax), (xmin, ymin)]
if color:
return hv.Path([path]).opts(dict(Path=dict(style=dict(color=color))))
else:
return hv.Path([path])
def highlight_points(self, xs, ys, size=0.1, color=None, hidden_buffer_box=True):
"""
Returns a rectangular Path object for a set of points
:param list|float xs: List of x coordinates or a single x coord
:param list|float ys: List of y coordinates or a single y coord
:param float size: Margin around xmin,xmax,ymin,ymax of points
:param str color: Set the color of the Path object
:param bool hidden_buffer_box: Adds a transparent larger frame around the Path object to improve plot margins
:return: Rectangular Path object
:rtype: hv.Path
"""
# If a set of single points
if isinstance(xs, (int, float)) and isinstance(ys, (int, float)):
xs, ys = [xs], [ys]
# Collect mins nad maxes from all points
xmin, xmax, ymin, ymax = min(xs), max(xs), min(ys), max(ys)
# Add margins
xmin, xmax, ymin, ymax = xmin - size, xmax + size, ymin - size, ymax + size
# Create Path object
plot = self.path_box(xmin, xmax, ymin, ymax, color=color)
# If hidden_buffer_box is enabled
if hidden_buffer_box:
xmin, xmax, ymin, ymax = xmin - size, xmax + size, ymin - size, ymax + size
hbb = self.path_box(xmin, xmax, ymin, ymax).opts(dict(Path=dict(style=dict(alpha=0))))
return plot * hbb
else:
return plot
def gene_curves(self, gene, tissue):
"""
Returns set of 3 plots for tissue / gene given a dataframe of metadata and expression values
:param str gene: Gene (ex: ERBB2) to select
:param str tissue: Tissue (ex: Breast) to select
:return: Returns holoviews Layout object containing 3 plots for selected Tisssue / Gene
:rtype: hv.Layout
"""
# Subset dataframe for gene and tissue
df = self._subset([gene], [tissue])
# Logscale gene for calculations
df[gene] = df[gene].apply(l2norm)
# Subset by dataset
tumor, normal, gtex = subset_by_dataset(df)
# Get values for plot
records = []
for perc_tumor in [x * 0.1 for x in xrange(1, 11)]:
# Get log2 expression value for top x% tumor samples
exp = float(tumor.iloc[int(len(tumor) * perc_tumor) - 1][gene])
# Get percentage of samples in GTEx
perc_normal = (len(gtex[gtex[gene] > exp]) * 1.0) / len(gtex)
# Compute L2FC for tumor sample subset vs GTEx
tumor_mean = tumor.iloc[:int(len(tumor) * perc_tumor) - 1][gene].apply(lambda x: 2 ** x - 1).median()
gtex_mean = gtex[gene].apply(lambda x: 2 ** x - 1).median()
l2fc = log2fc(tumor_mean, gtex_mean)
# Store
records.append((tissue, exp, l2fc, perc_tumor, perc_normal, len(gtex), len(tumor), 'GTEx'))
# Create dataframe from records
info = pd.DataFrame.from_records(records, columns=['tissue', 'expression',
'l2fc',
'percent_tumor',
'percent_normal',
'num_normals', 'num_tumors',
'normal_dataset'])
# Define dimensions
tissue_dim = hv.Dimension('tissue', label='Tissue')
ptumor_dim = hv.Dimension('percent_tumor', label='% Tumor')
pnormal_dim = hv.Dimension('percent_normal', label='percent')
l2fc_dim = hv.Dimension('l2fc', label='log2FC')
exp_dim = hv.Dimension('expression', label='log2(x+1)')
# First plot - Percentage of Normal Samples
c1 = hv.Curve(data=info, kdims=[ptumor_dim],
vdims=[pnormal_dim, tissue_dim], group='Percentage of Normal Samples',
extents=(None, 0, None, 1))
s1 = hv.Scatter(data=info, kdims=[ptumor_dim],
vdims=[pnormal_dim, tissue_dim], group='Percentage of Normal Samples')
# Second Plot - Expression
c2 = hv.Curve(data=info, kdims=[ptumor_dim],
vdims=[exp_dim, tissue_dim], group='Gene Expression',
extents=(None, 0, None, 16))
s2 = hv.Scatter(data=info, kdims=[ptumor_dim],
vdims=[exp_dim, tissue_dim], group='Gene Expression')
# Third Plot - Log2 Fold Change
c3 = hv.Curve(data=info, kdims=[ptumor_dim],
vdims=[l2fc_dim, tissue_dim], group='Log2 Fold Change',
extents=(None, -0.5, None, 8))
s3 = hv.Scatter(data=info, kdims=[ptumor_dim],
vdims=[l2fc_dim, tissue_dim], group='Log2 Fold Change')
return (c1 * s1 + c2 * s2 + c3 * s3).cols(1)
def sample_counts(self, tissue_subset=None, groupby='tissue'):
"""
Bargraph of tissues grouped by dataset
:return: Bargraph | |
"""
Modular ball chaser robot.
To play yourself, type:
python modular_ball_chaser.py
Created by <NAME>. Licensed on the same terms as the rest of OpenAI Gym.
Modified by <NAME>.
"""
import numpy as np
import Box2D
from Box2D.b2 import fixtureDef
from Box2D.b2 import circleShape
from Box2D.b2 import polygonShape
from Box2D.b2 import contactListener
import gym
from gym import spaces
from modular_carrier_dynamics import ModularRobot
from gym.utils import seeding, EzPickle
import pyglet
pyglet.options["debug_gl"] = False
from pyglet import gl
STATE_W = 96 # less than Atari 160x192
STATE_H = 96
VIDEO_W = 600
VIDEO_H = 600
WINDOW_W = 600
WINDOW_H = 600
FPS = 50 # Frames per second
WALL_COLOR = [0.4, 0.4, 0.4]
BALL_SIZE = 1
BALL_COLOR = [0.5, 0.0, 0.0]
GOAL_SIZE = 5
GOAL_COLOR = [0.1, 0.8, 0.1]
def calculatePlayfield(max_grid_size):
return max(50, max_grid_size * 5 + 35)
class CollisionDetector(contactListener):
def __init__(self, env):
contactListener.__init__(self)
self.env = env
def BeginContact(self, contact):
u1 = contact.fixtureA.body.userData
u2 = contact.fixtureB.body.userData
if u1 or u2: # Ball collision
self.env.ball_touch = True
class Ball:
def __init__(self, world, predefined_ball_pos, predefined_ball_pos_noise,
ball_idx=None):
self.world = world
if ball_idx is None:
ball_idx = np.random.randint(len(predefined_ball_pos))
self.position = predefined_ball_pos[ball_idx % len(predefined_ball_pos)]
self.position += np.random.uniform(-predefined_ball_pos_noise,
predefined_ball_pos_noise, size=2)
self.ball_body = world.CreateDynamicBody(
position=self.position,
angle=0,
fixtures=[
fixtureDef(
shape=circleShape(radius=BALL_SIZE, pos=(0,0)),
density=0.1,
),
],
)
self.ball_body.color = BALL_COLOR
self.ball_body.userData = self.ball_body
def step(self, dt):
# Force
forw = self.ball_body.GetWorldVector((0, 1))
side = self.ball_body.GetWorldVector((1, 0))
v = self.ball_body.linearVelocity
vf = forw[0] * v[0] + forw[1] * v[1] # forward speed
vs = side[0] * v[0] + side[1] * v[1] # side speed
self.ball_body.ApplyForceToCenter(
(
-vf * forw[0] -vs * side[0],
-vf * forw[1] -vs * side[1],
),
True,
)
def draw(self, viewer):
from gym.envs.classic_control import rendering
for f in self.ball_body.fixtures:
trans = f.body.transform
t = rendering.Transform(translation=trans * f.shape.pos)
viewer.draw_circle(
f.shape.radius, 20, color=self.ball_body.color
).add_attr(t)
def destroy(self):
self.world.DestroyBody(self.ball_body)
self.ball_body = None
class Goal:
def __init__(self, world, predefined_goal_pos, goal_idx=None):
self.world = world
if goal_idx is None:
goal_idx = np.random.randint(len(predefined_goal_pos))
self.position = predefined_goal_pos[goal_idx % len(predefined_goal_pos)]
GOAL_POLY = [
(-GOAL_SIZE, +GOAL_SIZE),
(+GOAL_SIZE, +GOAL_SIZE),
(+GOAL_SIZE, -GOAL_SIZE),
(-GOAL_SIZE, -GOAL_SIZE),
]
self.goal_body = world.CreateStaticBody(
position=self.position,
angle=0,
fixtures=[
fixtureDef(
shape=polygonShape(
vertices=[(mx,my) for mx, my in GOAL_POLY]
),
maskBits=0x000,
),
],
)
self.goal_body.color = GOAL_COLOR
self.goal_body.userData = self.goal_body
def draw(self, viewer):
for f in self.goal_body.fixtures:
trans = f.body.transform
path = [trans * v for v in f.shape.vertices]
viewer.draw_polygon(path, color=self.goal_body.color)
def destroy(self):
self.world.DestroyBody(self.goal_body)
self.goal_body = None
class ModularCarrier(gym.Env, EzPickle):
metadata = {
"render.modes": ["human", "rgb_array", "state_pixels"],
"video.frames_per_second": FPS,
}
def __init__(self, body_grid, done_in_ball_touch,
predefined_ball_idx=None, zoom_manual=None,
manual_max_body_size=None):
EzPickle.__init__(self)
self.seed()
self.contactListener_keepref = CollisionDetector(self)
self.world = Box2D.b2World((0, 0), contactListener=self.contactListener_keepref)
self.viewer = None
self.robot = None
self.ball = None
self.goal = None
self.distance_robot_ball = None
self.reward = 0.0
self.ball_touch = False
self.done_in_ball_touch = done_in_ball_touch
self.predefined_ball_idx = predefined_ball_idx
self.robot_failure = False
self.zoom_manual = zoom_manual
self.body_grid = body_grid
self.module_number = np.sum(self.body_grid != 0)
self.sensor1_number = np.sum(body_grid == 2)
self.sensor2_number = np.sum(body_grid == 3)
self.sensor_number = self.sensor1_number + self.sensor2_number
self.observation_space = spaces.Box(
np.array(self.sensor_number*[0]).astype(np.float32),
np.array(self.sensor_number*[+1]).astype(np.float32),
)
self.observations = np.zeros(self.sensor_number)
self.actuator_number = np.sum(np.logical_or(body_grid == 4, body_grid == 5))
self.action_space = spaces.Box(
np.array(self.actuator_number*[-1]).astype(np.float32),
np.array(self.actuator_number*[+1]).astype(np.float32),
)
self.max_body_size = manual_max_body_size if manual_max_body_size \
else max(np.asarray(body_grid).shape)
self.playfield = calculatePlayfield(self.max_body_size)
self.sensor_sensibility = self.playfield
self.predefined_ball_pos = [[-0.6*self.playfield, 0.0],
[-0.3*self.playfield, 0.0],
[0.3*self.playfield, 0.0],
[0.6*self.playfield, 0.0]]
self.predefined_goal_pos = [[-0.6*self.playfield, 0.6*self.playfield],
[-0.3*self.playfield, 0.6*self.playfield],
[0.3*self.playfield, 0.6*self.playfield],
[0.6*self.playfield, 0.6*self.playfield]]
self.predefined_ball_pos_noise = 0.05*self.playfield
self.predefined_goal_idx = np.random.randint(len(self.predefined_goal_pos))
def seed(self, seed=None):
self.np_random, seed = seeding.np_random(seed)
return [seed]
def _destroy(self):
if self.robot:
self.robot.destroy()
if self.ball:
self.ball.destroy()
if self.goal:
self.goal.destroy()
def getDistanceRobotBall(self):
# distance_robot_ball = np.inf
# for obj in self.robot.drawlist:
# for f in obj.fixtures:
# if type(f.shape) is not circleShape:
# trans = f.body.transform
# path = [np.asarray(trans * v) for v in f.shape.vertices]
# for v in path:
# distance_vert_ball = np.linalg.norm(v - np.asarray(
# self.ball.ball_body.position))
# if distance_vert_ball < distance_robot_ball:
# distance_robot_ball = distance_vert_ball
# return distance_robot_ball
# robot_pos = self.robot.hull.position# + np.array([self.robot_init_x,
# # self.robot_init_y])
# return np.linalg.norm(robot_pos - self.ball.ball_body.position)
return np.linalg.norm(self.robot.center_tracker.position - self.ball.ball_body.position)
def normSensorDistance(self, d):
return np.float_power((d/self.sensor_sensibility) + 1, -2)
def reset(self):
self._destroy()
self.reward = 0.0
self.t = 0.0
self.observations = np.zeros(self.sensor_number)
self.ball_touch = False
try:
if self.zoom_manual is None:
self.robot = ModularRobot(self.world, self.body_grid,
init_x=np.random.uniform(-0.6*self.playfield,
0.6*self.playfield),
init_y=-0.85*self.playfield)
self.ball = Ball(self.world, self.predefined_ball_pos,
self.predefined_ball_pos_noise,
ball_idx=self.predefined_ball_idx)
self.goal = Goal(self.world, self.predefined_goal_pos)
else:
self.robot = ModularRobot(self.world, self.body_grid,
init_x=0,
init_y=0)
self.ball = Ball(self.world, self.predefined_ball_pos,
self.predefined_ball_pos_noise,
ball_idx=0)
self.goal = Goal(self.world, self.predefined_goal_pos)
self.distance_robot_ball = self.getDistanceRobotBall()
except:
self.robot_failure = True
return self.step(None)[0]
def robot_failure_reward(self):
return 0.01*(min(self.sensor_number,1)+min(self.actuator_number,2))
def step(self, action):
if self.robot_failure:
done = True
self.reward = self.robot_failure_reward()
else:
if action is not None:
self.robot.gas(action)
self.robot.step(1.0 / FPS)
self.ball.step(1.0 / FPS)
self.world.Step(1.0 / FPS, 6 * 30, 2 * 30)
self.t += 1.0 / FPS
self.distance_robot_ball = self.getDistanceRobotBall()
# Update sensor 1 observations
for i in range(self.sensor1_number):
sensor_position = self.robot.sensors1[i].position
distance_sensor_ball = np.linalg.norm(
sensor_position-self.ball.ball_body.position)
self.observations[i] = self.normSensorDistance(distance_sensor_ball)
# Update sensor 2 observations
for i in range(self.sensor2_number):
sensor_position = self.robot.sensors2[i].position
distance_sensor_goal = np.linalg.norm(
sensor_position-self.goal.goal_body.position)
self.observations[i+self.sensor1_number] = self.normSensorDistance(
distance_sensor_goal)
done = False
if action is not None: # First step without action, called from reset()
self.reward = 0.5*self.normSensorDistance(self.distance_robot_ball) +\
0.5*self.normSensorDistance(np.linalg.norm(
self.ball.ball_body.position - self.goal.goal_body.position))
return self.observations, self.reward, done, {}
def render(self, mode="human"):
assert mode in ["human", "state_pixels", "rgb_array"]
if self.viewer is None:
from gym.envs.classic_control import rendering
self.viewer = rendering.Viewer(WINDOW_W, WINDOW_H)
self.transform = rendering.Transform()
if "t" not in self.__dict__:
return # reset() not called yet
zoom = VIDEO_W / self.playfield
zoom *= 0.5
if self.zoom_manual:
zoom *= self.zoom_manual
self.transform.set_scale(zoom, zoom)
self.transform.set_translation(
WINDOW_W / 2,
WINDOW_H / 2,
)
if self.robot and self.ball:
self.goal.draw(self.viewer)
self.robot.draw(self.viewer)
self.ball.draw(self.viewer)
arr = None
win = self.viewer.window
win.switch_to()
win.dispatch_events()
win.clear()
t = self.transform
if mode == "rgb_array":
VP_W = VIDEO_W
VP_H = VIDEO_H
elif mode == "state_pixels":
VP_W = STATE_W
VP_H = STATE_H
else:
pixel_scale = 1
if hasattr(win.context, "_nscontext"):
pixel_scale = (
win.context._nscontext.view().backingScaleFactor()
) # pylint: disable=protected-access
VP_W = int(pixel_scale * WINDOW_W)
VP_H = int(pixel_scale * WINDOW_H)
gl.glViewport(0, 0, VP_W, VP_H)
t.enable()
self.render_playfield()
for geom in self.viewer.onetime_geoms:
geom.render()
self.viewer.onetime_geoms = []
t.disable()
if mode == "human":
win.flip()
return self.viewer.isopen
image_data = (
pyglet.image.get_buffer_manager().get_color_buffer().get_image_data()
)
arr = np.fromstring(image_data.get_data(), dtype=np.uint8, sep="")
arr = arr.reshape(VP_H, VP_W, 4)
arr = arr[::-1, :, 0:3]
return arr
def close(self):
if self.viewer is not None:
self.viewer.close()
self.viewer = None
def render_wall(self):
for obj in self.left_passage_wall+self.right_passage_wall:
for f in obj.fixtures:
trans = f.body.transform
path = [trans * v for v in f.shape.vertices]
self.viewer.draw_polygon(path, color=obj.color)
def render_playfield(self):
colors = [1, 1, 1, 1.0] * 4
polygons_ = [
+self.playfield,
+self.playfield,
0,
+self.playfield,
-self.playfield,
0,
-self.playfield,
-self.playfield,
0,
-self.playfield,
+self.playfield,
0,
]
vl = pyglet.graphics.vertex_list(
len(polygons_) // 3, ("v3f", polygons_), ("c4f", colors)
) # gl.GL_QUADS,
vl.draw(gl.GL_QUADS)
vl.delete()
if __name__ == "__main__":
from pyglet.window import key
# import matplotlib.pyplot as plt
# import datetime
# import os
# current_time_str = datetime.datetime.now().strftime("%Y%m%d-%H%M%S")
# video_dir = "carrier_" + current_time_str
# os.makedirs(video_dir)
# import utils
# body_grid = np.array([[0,0,0,0,0], [0,3,2,3,0], [0,0,0,0,0]])
# body_grid = np.array([[2,1,2], [4,1,4], [3,3,3]])
body_grid = np.array([[2,1,0,1,2],
[4,3,1,3,4],
[0,1,1,1,0],
[4,1,1,1,4],
[1,1,1,1,1]])
number_actions = np.sum(np.logical_or(body_grid == 4, body_grid == 5))
a = np.array(number_actions*[0.0])
def key_press(k, mod):
global restart
if k == 0xFF0D:
restart = True
if k == key.LEFT:
a[0] = -1.0
# a[2] = -1.0
if k == key.RIGHT:
a[0] = +1.0
# a[2] = +1.0
if k == key.UP:
a[1] = +1.0
# a[3] = +1.0
if k == key.DOWN:
a[1] = -1.0
# a[3] = -1.0
def key_release(k, mod):
if k == key.LEFT:
a[0] = 0
# a[2] = 0
if k == key.RIGHT:
a[0] = 0
# a[2] = 0
if k == key.UP:
a[1] = 0
# a[3] = 0
if k == key.DOWN:
a[1] = 0
# a[3] = 0
env = ModularCarrier(body_grid, False, predefined_ball_idx=3)
env.render()
env.viewer.window.on_key_press = key_press
env.viewer.window.on_key_release = key_release
isopen = True
while isopen:
env.reset()
total_reward = 0.0
steps = 0
restart = False
# with utils.VideoWriter("modular_ball_chaser.mp4", fps=20) as vid:
while True:
# a = [0, 1,0,0] if steps < 4 else [1, 1,0,0]
# a = [1, 1,0,0] if steps < 100 else [0, 0,0,0]
isopen = env.render()
# env_img = env.render(mode="rgb_array")
s, r, done, info = env.step(a)
total_reward += r
if steps % 50 == 0 or done:
print("\naction " + str(["{:+0.2f}".format(x) for x in a]))
print("observation " | |
# -*- coding: utf-8 -*-
"""
eve.flaskapp
~~~~~~~~~~~~
This module implements the central WSGI application object as a Flask
subclass.
:copyright: (c) 2013 by <NAME>.
:license: BSD, see LICENSE for more details.
"""
import eve
import sys
import os
from flask import Flask
from werkzeug.routing import BaseConverter
from werkzeug.serving import WSGIRequestHandler
from eve.io.mongo import Mongo, Validator
from eve.exceptions import ConfigException, SchemaException
from eve.endpoints import collections_endpoint, item_endpoint, home_endpoint
from eve.utils import api_prefix, extract_key_values
from events import Events
class EveWSGIRequestHandler(WSGIRequestHandler):
""" Extend werkzeug request handler to include current Eve version in all
responses, which is super-handy for debugging.
"""
@property
def server_version(self):
return 'Eve/%s ' % eve.__version__ + super(EveWSGIRequestHandler,
self).server_version
class RegexConverter(BaseConverter):
""" Extend werkzeug routing by supporting regex for urls/API endpoints """
def __init__(self, url_map, *items):
super(RegexConverter, self).__init__(url_map)
self.regex = items[0]
class Eve(Flask, Events):
""" The main Eve object. On initialization it will load Eve settings, then
configure and enable the API endpoints. The API is launched by executing
the code below:::
app = Eve()
app.run()
:param import_name: the name of the application package
:param settings: the name of the settings file. Defaults to `settings.py`.
:param validator: custom validation class. Must be a
:class:`~cerberus.Validator` subclass. Defaults to
:class:`eve.io.mongo.Validator`.
:param data: the data layer class. Must be a :class:`~eve.io.DataLayer`
subclass. Defaults to :class:`~eve.io.Mongo`.
:param auth: the authentication class used to authenticate incoming
requests. Must be a :class: `eve.auth.BasicAuth` subclass.
:param redis: the redis (pyredis) instance used by the Rate-Limiting
feature, if enabled.
:param url_converters: dictionary of Flask url_converters to add to
supported ones (int, float, path, regex).
:param json_encoder: custom json encoder class. Must be a
JSONEncoder subclass. You probably wnat it to be
as eve.io.base.BaseJSONEncoder subclass.
:param kwargs: optional, standard, Flask parameters.
.. versionchanged:: 0.2
Support for additional Flask url converters.
Support for optional, custom json encoder class.
Support for endpoint-level authenticatoin classes.
New method Eve.register_resource() for registering new resource after
initialization of Eve object. This is needed for simpler initialization
API of all ORM/ODM extensions.
.. versionchanged:: 0.1.0
Now supporting both "trailing slashes" and "no-trailing slashes" URLs.
.. versionchanged:: 0.0.7
'redis' argument added to handle an accessory Redis server (currently
used by the Rate-Limiting feature).
.. versionchanged:: 0.0.6
'Events' added to the list of super classes, allowing for the arbitrary
raising of events within the application.
.. versionchanged:: 0.0.4
'auth' argument added to handle authentication classes
"""
#: Allowed methods for resource endpoints
supported_resource_methods = ['GET', 'POST', 'DELETE']
#: Allowed methods for item endpoints
supported_item_methods = ['GET', 'PATCH', 'DELETE', 'PUT']
def __init__(self, import_name=__package__, settings='settings.py',
validator=Validator, data=Mongo, auth=None, redis=None,
url_converters=None, json_encoder=None, **kwargs):
""" Eve main WSGI app is implemented as a Flask subclass. Since we want
to be able to launch our API by simply invoking Flask's run() method,
we need to enhance our super-class a little bit.
The tasks we need to accomplish are:
1. enbale regex routing
2. enable optional url_converters, if any
3. enable optional json_encoder class, if any
4. load and validate custom API settings
5. enable API endpoints
6. set the validator class used to validate incoming objects
7. activate the chosen data layer
8. instance the authentication layer if needed
9. set the redis instance to be used by the Rate-Limiting feature
.. versionchanged:: 0.2
Support for additional, optional Flask url_converters.
Support for optional, custom json encoder class.
Support for endpoint-level authenticatoin classes.
Validate and set defaults for each resource
"""
super(Eve, self).__init__(import_name, **kwargs)
self.validator = validator
self.settings = settings
self.load_config()
self.validate_domain_struct()
self.data = data(self)
# enable regex routing
self.url_map.converters['regex'] = RegexConverter
# optional url_converters and json encoder
if url_converters:
self.url_map.converters.update(url_converters)
if json_encoder:
self.data.json_encoder_class = json_encoder
self.auth = auth() if auth else None
self.redis = redis
# validate and set defaults for each resource
for resource, settings in self.config['DOMAIN'].items():
self._set_resource_defaults(resource, settings)
self._validate_resource_settings(resource, settings)
self._add_url_rules()
def run(self, host=None, port=None, debug=None, **options):
"""
Pass our own subclass of :class:`werkzeug.serving.WSGIRequestHandler
to Flask.
:param host: the hostname to listen on. Set this to ``'0.0.0.0'`` to
have the server available externally as well. Defaults to
``'127.0.0.1'``.
:param port: the port of the webserver. Defaults to ``5000``.
:param debug: if given, enable or disable debug mode.
See :attr:`debug`.
:param options: the options to be forwarded to the underlying
Werkzeug server. See
:func:`werkzeug.serving.run_simple` for more
information. """
options.setdefault('request_handler', EveWSGIRequestHandler)
super(Eve, self).run(host, port, debug, **options)
def load_config(self):
""" API settings are loaded from standard python modules. First from
`settings.py`(or alternative name/path passed as an argument) and
then, when defined, from the file specified in the
`EVE_SETTINGS` environment variable.
Since we are a Flask subclass, any configuration value supported by
Flask itself is available (besides Eve's proper settings).
.. versionchanged:: 0.2
Allow use of a dict object as settings.
"""
# load defaults
self.config.from_object('eve.default_settings')
# overwrite the defaults with custom user settings
if isinstance(self.settings, dict):
self.config.update(self.settings)
else:
if os.path.isabs(self.settings):
pyfile = self.settings
else:
abspath = os.path.abspath(os.path.dirname(sys.argv[0]))
pyfile = os.path.join(abspath, self.settings)
self.config.from_pyfile(pyfile)
#overwrite settings with custom environment variable
envvar = 'EVE_SETTINGS'
if os.environ.get(envvar):
self.config.from_envvar(envvar)
def validate_domain_struct(self):
""" Validates that Eve configuration settings conform to the
requirements.
"""
try:
domain = self.config['DOMAIN']
except:
raise ConfigException('DOMAIN dictionary missing or wrong.')
if not isinstance(domain, dict):
raise ConfigException('DOMAIN must be a dict.')
if len(domain) == 0:
raise ConfigException('DOMAIN must contain at least one resource.')
def validate_config(self):
""" Makes sure that REST methods expressed in the configuration
settings are supported.
.. versionchanged:: 0.2.0
Default supported methods are now class-level attributes.
Resource validation delegated to _validate_resource_settings().
.. versionchanged:: 0.1.0
Support for PUT method.
.. versionchanged:: 0.0.4
Support for 'allowed_roles' and 'allowed_item_roles'
.. versionchanged:: 0.0.2
Support for DELETE resource method.
"""
# make sure that global resource methods are supported.
self.validate_methods(self.supported_resource_methods,
self.config.get('RESOURCE_METHODS'),
'resource')
# make sure that global item methods are supported.
self.validate_methods(self.supported_item_methods,
self.config.get('ITEM_METHODS'),
'item')
# make sure that individual resource/item methods are supported.
for resource, settings in self.config['DOMAIN'].items():
self._validate_resource_settings(resource, settings)
def _validate_resource_settings(self, resource, settings):
""" Validates one resource in configuration settings.
:param resource: name of the resource which settings refer to.
:param settings: settings of resource to be validated.
.. versionadded:: 0.2
"""
self.validate_methods(self.supported_resource_methods,
settings['resource_methods'],
'[%s] resource ' % resource)
self.validate_methods(self.supported_item_methods,
settings['item_methods'],
'[%s] item ' % resource)
# while a resource schema is optional for read-only access,
# it is mandatory for write-access to resource/items.
if 'POST' in settings['resource_methods'] or \
'PATCH' in settings['item_methods']:
if len(settings['schema']) == 0:
raise ConfigException('A resource schema must be provided '
'when POST or PATCH methods are allowed'
'for a resource [%s].' % resource)
self.validate_roles('allowed_roles', settings, resource)
self.validate_roles('allowed_item_roles', settings, resource)
self.validate_schema(resource, settings['schema'])
def validate_roles(self, directive, candidate, resource):
""" Validates that user role directives are syntactically and formally
adeguate.
:param directive: either 'allowed_roles' or 'allow_item_roles'.
:param candidate: the candidate setting to be validated.
:param resource: name of the resource to which the candidate settings
refer to.
.. versionadded:: 0.0.4
"""
roles = candidate[directive]
if roles is not None and (not isinstance(roles, list) or not
len(roles)):
raise ConfigException("'%s' must be a non-empty list, or None "
"[%s]." % (directive, resource))
def validate_methods(self, allowed, proposed, item):
""" Compares allowed and proposed methods, raising a `ConfigException`
when they don't match.
:param allowed: a list of supported (allowed) methods.
:param proposed: a list of proposed methods.
:param item: name of the item to which the methods would be applied.
Used when raising the exception.
"""
diff = set(proposed) - set(allowed)
if diff:
raise ConfigException('Unallowed %s method(s): %s. '
'Supported: %s' %
(item, ', '.join(diff),
', '.join(allowed)))
def validate_schema(self, resource, schema):
""" Validates a resource schema.
:param resource: resource name.
:param schema: schema definition for the resource.
.. versionchanged:: 0.2
Allow ID_FIELD in resource schema if not of 'objectid' type.
.. versionchanged:: 0.1.1
'collection' setting renamed to 'resource' (data_relation).
Fix order of string arguments in exception message.
.. versionchanged:: 0.1.0
Validation for 'embeddable' fields.
.. versionchanged:: 0.0.5
Validation of the 'data_relation' field rule.
Now collecting offending items in a list and inserting results into
the exception message.
"""
# TODO are there other mandatory settings? Validate them here
| |
"metadataonly",
("prop", "1943:309"): "metadataonly",
("prop", "1943:31"): "metadataonly",
("prop", "1943:310"): "metadataonly",
("prop", "1943:311"): "metadataonly",
("prop", "1943:312"): "metadataonly",
("prop", "1943:313"): "metadataonly",
("prop", "1943:314"): "metadataonly",
("prop", "1943:315"): "metadataonly",
("prop", "1943:316"): "metadataonly",
("prop", "1943:317"): "metadataonly",
("prop", "1943:318"): "metadataonly",
("prop", "1943:319"): "metadataonly",
("prop", "1943:32"): "metadataonly",
("prop", "1943:321"): "metadataonly",
("prop", "1943:322"): "metadataonly",
("prop", "1943:323"): "metadataonly",
("prop", "1943:324"): "metadataonly",
("prop", "1943:325"): "metadataonly",
("prop", "1943:326"): "metadataonly",
("prop", "1943:327"): "metadataonly",
("prop", "1943:33"): "metadataonly",
("prop", "1943:34"): "metadataonly",
("prop", "1943:35"): "metadataonly",
("prop", "1943:36"): "metadataonly",
("prop", "1943:37"): "metadataonly",
("prop", "1943:38"): "metadataonly",
("prop", "1943:39"): "metadataonly",
("prop", "1943:40"): "metadataonly",
("prop", "1943:41"): "metadataonly",
("prop", "1943:42"): "metadataonly",
("prop", "1943:43"): "metadataonly",
("prop", "1943:44"): "metadataonly",
("prop", "1943:45"): "metadataonly",
("prop", "1943:46"): "metadataonly",
("prop", "1943:47"): "metadataonly",
("prop", "1943:48"): "metadataonly",
("prop", "1943:49"): "metadataonly",
("prop", "1943:5"): "metadataonly",
("prop", "1943:50"): "metadataonly",
("prop", "1943:51"): "metadataonly",
("prop", "1943:52"): "metadataonly",
("prop", "1943:53"): "metadataonly",
("prop", "1943:54"): "metadataonly",
("prop", "1943:55"): "metadataonly",
("prop", "1943:56"): "metadataonly",
("prop", "1943:57"): "metadataonly",
("prop", "1943:58"): "metadataonly",
("prop", "1943:59"): "metadataonly",
("prop", "1943:6"): "metadataonly",
("prop", "1943:60"): "metadataonly",
("prop", "1943:61"): "metadataonly",
("prop", "1943:62"): "metadataonly",
("prop", "1943:63"): "metadataonly",
("prop", "1943:64"): "metadataonly",
("prop", "1943:65"): "metadataonly",
("prop", "1943:66"): "metadataonly",
("prop", "1943:67"): "metadataonly",
("prop", "1943:68"): "metadataonly",
("prop", "1943:69"): "metadataonly",
("prop", "1943:7"): "metadataonly",
("prop", "1943:70"): "metadataonly",
("prop", "1943:71"): "metadataonly",
("prop", "1943:72"): "metadataonly",
("prop", "1943:73"): "metadataonly",
("prop", "1943:74"): "metadataonly",
("prop", "1943:75"): "metadataonly",
("prop", "1943:76"): "metadataonly",
("prop", "1943:77"): "metadataonly",
("prop", "1943:78"): "metadataonly",
("prop", "1943:79"): "metadataonly",
("prop", "1943:8"): "metadataonly",
("prop", "1943:80"): "metadataonly",
("prop", "1943:81"): "metadataonly",
("prop", "1943:82"): "metadataonly",
("prop", "1943:83"): "metadataonly",
("prop", "1943:84"): "metadataonly",
("prop", "1943:85"): "metadataonly",
("prop", "1943:86"): "metadataonly",
("prop", "1943:87"): "metadataonly",
("prop", "1943:88"): "metadataonly",
("prop", "1943:89"): "metadataonly",
("prop", "1943:9"): "metadataonly",
("prop", "1943:90"): "metadataonly",
("prop", "1943:92"): "metadataonly",
("prop", "1943:93"): "metadataonly",
("prop", "1943:94"): "metadataonly",
("prop", "1943:95"): "metadataonly",
("prop", "1943:96"): "metadataonly",
("prop", "1943:97"): "metadataonly",
("prop", "1943:98"): "metadataonly",
("prop", "1943:99"): "metadataonly",
("prop", "1944:1"): "metadataonly",
("prop", "1944:100"): "metadataonly",
("prop", "1944:101"): "metadataonly",
("prop", "1944:102"): "metadataonly",
("prop", "1944:103"): "metadataonly",
("prop", "1944:104"): "metadataonly",
("prop", "1944:105"): "metadataonly",
("prop", "1944:106"): "metadataonly",
("prop", "1944:107"): "metadataonly",
("prop", "1944:108"): "metadataonly",
("prop", "1944:109"): "metadataonly",
("prop", "1944:110"): "metadataonly",
("prop", "1944:111"): "metadataonly",
("prop", "1944:112"): "metadataonly",
("prop", "1944:113"): "metadataonly",
("prop", "1944:114"): "metadataonly",
("prop", "1944:115"): "metadataonly",
("prop", "1944:116"): "metadataonly",
("prop", "1944:117"): "metadataonly",
("prop", "1944:118"): "metadataonly",
("prop", "1944:119"): "metadataonly",
("prop", "1944:120"): "metadataonly",
("prop", "1944:121"): "metadataonly",
("prop", "1944:122"): "metadataonly",
("prop", "1944:123"): "metadataonly",
("prop", "1944:124"): "metadataonly",
("prop", "1944:125"): "metadataonly",
("prop", "1944:126"): "metadataonly",
("prop", "1944:127"): "metadataonly",
("prop", "1944:128"): "metadataonly",
("prop", "1944:129"): "metadataonly",
("prop", "1944:130"): "metadataonly",
("prop", "1944:131"): "metadataonly",
("prop", "1944:132"): "metadataonly",
("prop", "1944:133"): "metadataonly",
("prop", "1944:134"): "metadataonly",
("prop", "1944:135"): "metadataonly",
("prop", "1944:136"): "metadataonly",
("prop", "1944:137"): "metadataonly",
("prop", "1944:138"): "metadataonly",
("prop", "1944:139"): "metadataonly",
("prop", "1944:140"): "metadataonly",
("prop", "1944:141"): "metadataonly",
("prop", "1944:142"): "metadataonly",
("prop", "1944:143"): "metadataonly",
("prop", "1944:144"): "metadataonly",
("prop", "1944:145"): "metadataonly",
("prop", "1944:146"): "metadataonly",
("prop", "1944:147"): "metadataonly",
("prop", "1944:148"): "metadataonly",
("prop", "1944:149"): "metadataonly",
("prop", "1944:15"): "metadataonly",
("prop", "1944:150"): "metadataonly",
("prop", "1944:151"): "metadataonly",
("prop", "1944:152"): "metadataonly",
("prop", "1944:153"): "metadataonly",
("prop", "1944:154"): "metadataonly",
("prop", "1944:155"): "metadataonly",
("prop", "1944:156"): "metadataonly",
("prop", "1944:157"): "metadataonly",
("prop", "1944:158"): "metadataonly",
("prop", "1944:159"): "metadataonly",
("prop", "1944:16"): "metadataonly",
("prop", "1944:160"): "metadataonly",
("prop", "1944:161"): "metadataonly",
("prop", "1944:162"): "metadataonly",
("prop", "1944:163"): "metadataonly",
("prop", "1944:164"): "metadataonly",
("prop", "1944:165"): "metadataonly",
("prop", "1944:166"): "metadataonly",
("prop", "1944:167"): "metadataonly",
("prop", "1944:168"): "metadataonly",
("prop", "1944:17"): "metadataonly",
("prop", "1944:170"): "metadataonly",
("prop", "1944:171"): "metadataonly",
("prop", "1944:172"): "metadataonly",
("prop", "1944:173"): "metadataonly",
("prop", "1944:174"): "metadataonly",
("prop", "1944:175"): "metadataonly",
("prop", "1944:177"): "metadataonly",
("prop", "1944:178"): "metadataonly",
("prop", "1944:179"): "metadataonly",
("prop", "1944:18"): "metadataonly",
("prop", "1944:180"): "metadataonly",
("prop", "1944:181"): "metadataonly",
("prop", "1944:182"): "metadataonly",
("prop", "1944:183"): "metadataonly",
("prop", "1944:184"): "metadataonly",
("prop", "1944:185"): "metadataonly",
("prop", "1944:186"): "metadataonly",
("prop", "1944:187"): "metadataonly",
("prop", "1944:189"): "metadataonly",
("prop", "1944:19"): "metadataonly",
("prop", "1944:190"): "metadataonly",
("prop", "1944:191"): "metadataonly",
("prop", "1944:192"): "metadataonly",
("prop", "1944:194"): "metadataonly",
("prop", "1944:195"): "metadataonly",
("prop", "1944:196"): "metadataonly",
("prop", "1944:197"): "metadataonly",
("prop", "1944:198"): "metadataonly",
("prop", "1944:199"): "metadataonly",
("prop", "1944:2"): "metadataonly",
("prop", "1944:200"): "metadataonly",
("prop", "1944:202"): "metadataonly",
("prop", "1944:203"): "metadataonly",
("prop", "1944:204"): "metadataonly",
("prop", "1944:205"): "metadataonly",
("prop", "1944:206"): "metadataonly",
("prop", "1944:207"): "metadataonly",
("prop", "1944:208"): "metadataonly",
("prop", "1944:209"): "metadataonly",
("prop", "1944:21"): "metadataonly",
("prop", "1944:210"): "metadataonly",
("prop", "1944:211"): "metadataonly",
("prop", "1944:212"): "metadataonly",
("prop", "1944:213"): "metadataonly",
("prop", "1944:214"): "metadataonly",
("prop", "1944:215"): "metadataonly",
("prop", "1944:216"): "metadataonly",
("prop", "1944:217"): "metadataonly",
("prop", "1944:218"): "metadataonly",
("prop", "1944:219"): "metadataonly",
("prop", "1944:22"): "metadataonly",
("prop", "1944:220"): "metadataonly",
("prop", "1944:221"): "metadataonly",
("prop", "1944:222"): "metadataonly",
("prop", "1944:223"): "metadataonly",
("prop", "1944:224"): "metadataonly",
("prop", "1944:225"): "metadataonly",
("prop", "1944:226"): "metadataonly",
("prop", "1944:227"): "metadataonly",
("prop", "1944:228"): "metadataonly",
("prop", "1944:229"): "metadataonly",
("prop", "1944:23"): "metadataonly",
("prop", "1944:230"): "metadataonly",
("prop", "1944:231"): "metadataonly",
("prop", "1944:232"): "metadataonly",
("prop", "1944:233"): "metadataonly",
("prop", "1944:234"): "metadataonly",
("prop", "1944:235"): "metadataonly",
("prop", "1944:236"): "metadataonly",
("prop", "1944:237"): "metadataonly",
("prop", "1944:238"): "metadataonly",
("prop", "1944:239"): "metadataonly",
("prop", "1944:24"): "metadataonly",
("prop", "1944:240"): "metadataonly",
("prop", "1944:241"): "metadataonly",
("prop", "1944:242"): "metadataonly",
("prop", "1944:243"): "metadataonly",
("prop", "1944:244"): "metadataonly",
("prop", "1944:246"): "metadataonly",
("prop", "1944:247"): "metadataonly",
("prop", "1944:248"): "metadataonly",
("prop", "1944:249"): "metadataonly",
("prop", "1944:25"): "metadataonly",
("prop", "1944:250"): "metadataonly",
("prop", "1944:251"): "metadataonly",
("prop", "1944:253"): "metadataonly",
("prop", "1944:254"): "metadataonly",
("prop", "1944:255"): "metadataonly",
("prop", "1944:256"): "metadataonly",
("prop", "1944:257"): "metadataonly",
("prop", "1944:258"): "metadataonly",
("prop", "1944:259"): "metadataonly",
("prop", "1944:26"): "metadataonly",
("prop", "1944:261"): "metadataonly",
("prop", "1944:262"): "metadataonly",
("prop", "1944:263"): "metadataonly",
("prop", "1944:264"): "metadataonly",
("prop", "1944:267"): "metadataonly",
("prop", "1944:27"): "metadataonly",
("prop", "1944:270"): "metadataonly",
("prop", "1944:271"): "metadataonly",
("prop", "1944:272"): "metadataonly",
("prop", "1944:273"): "metadataonly",
("prop", "1944:274"): "metadataonly",
("prop", "1944:275"): "metadataonly",
("prop", "1944:276"): "metadataonly",
("prop", "1944:277"): "metadataonly",
("prop", "1944:278"): "metadataonly",
("prop", "1944:279"): "metadataonly",
("prop", "1944:28"): "metadataonly",
("prop", "1944:282"): "metadataonly",
("prop", "1944:283"): "metadataonly",
("prop", "1944:284"): "metadataonly",
("prop", "1944:285"): "metadataonly",
("prop", "1944:286"): "metadataonly",
("prop", "1944:29"): "metadataonly",
("prop", "1944:3"): "metadataonly",
("prop", "1944:30"): "metadataonly",
("prop", "1944:31"): "metadataonly",
("prop", "1944:32"): "metadataonly",
("prop", "1944:33"): "metadataonly",
("prop", "1944:34"): "metadataonly",
("prop", "1944:35"): "metadataonly",
("prop", "1944:36"): "metadataonly",
("prop", "1944:37"): "metadataonly",
("prop", "1944:38"): "metadataonly",
("prop", "1944:39"): "metadataonly",
("prop", "1944:4"): "metadataonly",
("prop", "1944:40"): "metadataonly",
("prop", "1944:41"): "metadataonly",
("prop", "1944:42"): "metadataonly",
("prop", "1944:44"): "metadataonly",
("prop", "1944:45"): "metadataonly",
("prop", "1944:46"): "metadataonly",
("prop", "1944:47"): "metadataonly",
("prop", "1944:48"): "metadataonly",
("prop", "1944:49"): "metadataonly",
("prop", "1944:50"): "metadataonly",
("prop", "1944:51"): "metadataonly",
("prop", "1944:52"): "metadataonly",
("prop", "1944:53"): "metadataonly",
("prop", "1944:54"): "metadataonly",
("prop", "1944:55"): "metadataonly",
("prop", "1944:56"): "metadataonly",
("prop", "1944:57"): "metadataonly",
("prop", "1944:58"): "metadataonly",
("prop", "1944:59"): "metadataonly",
("prop", "1944:6"): "metadataonly",
("prop", "1944:60"): "metadataonly",
("prop", "1944:61"): "metadataonly",
("prop", "1944:62"): "metadataonly",
("prop", "1944:64"): "metadataonly",
("prop", "1944:65"): "metadataonly",
("prop", "1944:66"): "metadataonly",
("prop", "1944:67"): "metadataonly",
("prop", "1944:68"): "metadataonly",
("prop", "1944:69"): "metadataonly",
("prop", "1944:7"): "metadataonly",
("prop", "1944:70"): "metadataonly",
("prop", "1944:71"): "metadataonly",
("prop", "1944:72"): "metadataonly",
("prop", "1944:73"): "metadataonly",
("prop", "1944:74"): "metadataonly",
("prop", "1944:75"): "metadataonly",
("prop", "1944:76"): "metadataonly",
("prop", "1944:77"): "metadataonly",
("prop", "1944:78"): "metadataonly",
("prop", "1944:79"): "metadataonly",
("prop", "1944:80"): "metadataonly",
("prop", "1944:82"): "metadataonly",
("prop", "1944:83"): "metadataonly",
("prop", "1944:84"): "metadataonly",
("prop", "1944:85"): "metadataonly",
("prop", "1944:86"): "metadataonly",
("prop", "1944:87"): "metadataonly",
("prop", "1944:88"): "metadataonly",
("prop", "1944:89"): "metadataonly",
("prop", "1944:9"): "metadataonly",
("prop", "1944:90"): "metadataonly",
("prop", "1944:91"): "metadataonly",
("prop", "1944:92"): "metadataonly",
("prop", "1944:93"): "metadataonly",
("prop", "1944:94"): "metadataonly",
("prop", "1944:95"): "metadataonly",
("prop", "1944:96"): "metadataonly",
("prop", "1944:97"): "metadataonly",
("prop", "1944:98"): "metadataonly",
("prop", "1944:99"): "metadataonly",
("prop", "1945:100"): "metadataonly",
("prop", "1945:101"): "metadataonly",
("prop", "1945:102"): "metadataonly",
("prop", "1945:104"): "metadataonly",
("prop", "1945:105"): "metadataonly",
("prop", "1945:106"): "metadataonly",
("prop", "1945:107"): "metadataonly",
("prop", "1945:108"): "metadataonly",
("prop", "1945:109"): "metadataonly",
("prop", "1945:11"): "metadataonly",
("prop", "1945:110"): "metadataonly",
("prop", "1945:111"): "metadataonly",
("prop", "1945:112"): "metadataonly",
("prop", "1945:113"): "metadataonly",
("prop", "1945:114"): "metadataonly",
("prop", "1945:115"): "metadataonly",
("prop", "1945:116"): "metadataonly",
("prop", "1945:117"): "metadataonly",
("prop", "1945:118"): "metadataonly",
("prop", "1945:119"): "metadataonly",
("prop", "1945:12"): "metadataonly",
("prop", "1945:120"): "metadataonly",
("prop", "1945:121"): "metadataonly",
("prop", "1945:122"): "metadataonly",
("prop", "1945:123"): "metadataonly",
("prop", "1945:124"): "metadataonly",
("prop", "1945:125"): "metadataonly",
("prop", "1945:126"): "metadataonly",
("prop", "1945:127"): "metadataonly",
("prop", "1945:128"): "metadataonly",
("prop", "1945:129"): "metadataonly",
("prop", "1945:13"): "metadataonly",
("prop", "1945:130"): "metadataonly",
("prop", "1945:131"): "metadataonly",
("prop", "1945:132"): "metadataonly",
("prop", "1945:133"): "metadataonly",
("prop", "1945:134"): "metadataonly",
("prop", "1945:135"): "metadataonly",
("prop", "1945:136"): "metadataonly",
("prop", "1945:137"): "metadataonly",
("prop", "1945:138"): "metadataonly",
("prop", "1945:14"): "metadataonly",
("prop", "1945:140"): "metadataonly",
("prop", "1945:141"): "metadataonly",
("prop", "1945:142"): "metadataonly",
("prop", "1945:143"): "metadataonly",
("prop", "1945:144"): "metadataonly",
("prop", "1945:145"): "metadataonly",
("prop", "1945:146"): "metadataonly",
("prop", "1945:147"): "metadataonly",
("prop", "1945:148"): "metadataonly",
("prop", "1945:149"): "metadataonly",
("prop", "1945:15"): "metadataonly",
("prop", "1945:150"): "metadataonly",
("prop", "1945:151"): "metadataonly",
("prop", "1945:152"): "metadataonly",
("prop", "1945:153"): "metadataonly",
("prop", "1945:154"): "metadataonly",
("prop", "1945:155"): "metadataonly",
("prop", "1945:156"): "metadataonly",
("prop", "1945:157"): "metadataonly",
("prop", "1945:158"): "metadataonly",
("prop", "1945:159"): "metadataonly",
("prop", "1945:16"): "metadataonly",
("prop", "1945:160"): "metadataonly",
("prop", "1945:161"): "metadataonly",
("prop", "1945:162"): "metadataonly",
("prop", "1945:163"): "metadataonly",
("prop", "1945:164"): "metadataonly",
("prop", "1945:165"): "metadataonly",
("prop", "1945:166"): "metadataonly",
("prop", "1945:167"): "metadataonly",
("prop", "1945:168"): "metadataonly",
("prop", "1945:169"): "metadataonly",
("prop", | |
as uuid on user.id = uuid.user_id where uuid.uuid = '%s' """ % uuid
try:
cur.execute(sql)
except sqltool.Error as e:
funct.out_error(e)
else:
for user_id in cur.fetchall():
return user_id[0]
cur.close()
con.close()
def get_user_telegram_by_uuid(uuid):
con, cur = get_cur()
sql = """ select telegram.* from telegram left join user as user on telegram.groups = user.groups left join uuid as uuid on user.id = uuid.user_id where uuid.uuid = '%s' """ % uuid
try:
cur.execute(sql)
except sqltool.Error as e:
funct.out_error(e)
else:
return cur.fetchall()
cur.close()
con.close()
def get_telegram_by_ip(ip):
con, cur = get_cur()
sql = """ select telegram.* from telegram left join servers as serv on serv.groups = telegram.groups where serv.ip = '%s' """ % ip
try:
cur.execute(sql)
except sqltool.Error as e:
funct.out_error(e)
else:
return cur.fetchall()
cur.close()
con.close()
def get_dick_permit(**kwargs):
import http.cookies
import os
cookie = http.cookies.SimpleCookie(os.environ.get("HTTP_COOKIE"))
user_id = cookie.get('uuid')
disable = ''
haproxy = ''
nginx = ''
keepalived = ''
ip = ''
con, cur = get_cur()
if kwargs.get('username'):
sql = """ select * from user where username = '%s' """ % kwargs.get('username')
else:
sql = """ select * from user where username = '%s' """ % get_user_name_by_uuid(user_id.value)
if kwargs.get('virt'):
type_ip = ""
else:
type_ip = "and type_ip = 0"
if kwargs.get('disable') == 0:
disable = 'or enable = 0'
if kwargs.get('ip'):
ip = "and ip = '%s'" % kwargs.get('ip')
if kwargs.get('haproxy'):
haproxy = "and haproxy = 1"
if kwargs.get('nginx'):
nginx = "and nginx = 1"
if kwargs.get('keepalived'):
nginx = "and keepalived = 1"
try:
cur.execute(sql)
except sqltool.Error as e:
print("An error occurred:", e)
else:
for group in cur:
if group[5] == '1':
sql = """ select * from servers where enable = 1 %s %s %s """ % (disable, type_ip, nginx)
else:
sql = """ select * from servers where groups like '%{group}%' and (enable = 1 {disable}) {type_ip} {ip} {haproxy} {nginx} {keepalived}
""".format(group=group[5], disable=disable, type_ip=type_ip, ip=ip, haproxy=haproxy, nginx=nginx, keepalived=keepalived)
try:
cur.execute(sql)
except sqltool.Error as e:
funct.out_error(e)
else:
return cur.fetchall()
cur.close()
con.close()
def is_master(ip, **kwargs):
con, cur = get_cur()
sql = """ select slave.ip, slave.hostname from servers as master left join servers as slave on master.id = slave.master where master.ip = '%s' """ % ip
if kwargs.get('master_slave'):
sql = """ select master.hostname, master.ip, slave.hostname, slave.ip from servers as master left join servers as slave on master.id = slave.master where slave.master > 0 """
try:
cur.execute(sql)
except sqltool.Error as e:
funct.out_error(e)
else:
return cur.fetchall()
cur.close()
con.close()
def select_ssh(**kwargs):
con, cur = get_cur()
sql = """select * from cred """
if kwargs.get("name") is not None:
sql = """select * from cred where name = '%s' """ % kwargs.get("name")
if kwargs.get("id") is not None:
sql = """select * from cred where id = '%s' """ % kwargs.get("id")
if kwargs.get("serv") is not None:
sql = """select serv.cred, cred.* from servers as serv left join cred on cred.id = serv.cred where serv.ip = '%s' """ % kwargs.get("serv")
try:
cur.execute(sql)
except sqltool.Error as e:
funct.out_error(e)
else:
return cur.fetchall()
cur.close()
con.close()
def insert_new_ssh(name, enable, group, username, password):
con, cur = get_cur()
sql = """insert into cred(name, enable, groups, username, password) values ('%s', '%s', '%s', '%s', '%s') """ % (name, enable, group, username, password)
try:
cur.execute(sql)
con.commit()
except sqltool.Error as e:
funct.out_error(e)
con.rollback()
else:
return True
cur.close()
con.close()
def delete_ssh(id):
con, cur = get_cur()
sql = """ delete from cred where id = %s """ % (id)
try:
cur.execute(sql)
con.commit()
except sqltool.Error as e:
funct.out_error(e)
con.rollback()
else:
return True
cur.close()
con.close()
def update_ssh(id, name, enable, group, username, password):
con, cur = get_cur()
sql = """ update cred set
name = '%s',
enable = '%s',
groups = %s,
username = '%s',
password = <PASSWORD>' where id = '%s' """ % (name, enable, group, username, password, id)
try:
cur.execute(sql)
con.commit()
except sqltool.Error as e:
funct.out_error(e)
con.rollback()
cur.close()
con.close()
def insert_backup_job(server, rserver, rpath, type, time, cred, description):
con, cur = get_cur()
sql = """insert into backups(server, rhost, rpath, type, time, cred, description) values ('%s', '%s', '%s', '%s', '%s', '%s', '%s') """ % (server, rserver, rpath, type, time, cred, description)
try:
cur.execute(sql)
con.commit()
except sqltool.Error as e:
funct.out_error(e)
con.rollback()
return False
else:
return True
cur.close()
con.close()
def select_backups(**kwargs):
con, cur = get_cur()
sql = """select * from backups ORDER BY id"""
if kwargs.get("server") is not None and kwargs.get("rserver") is not None:
sql = """select * from backups where server='%s' and rhost = '%s' """ % (kwargs.get("server"), kwargs.get("rserver"))
try:
cur.execute(sql)
except sqltool.Error as e:
funct.out_error(e)
else:
return cur.fetchall()
cur.close()
con.close()
def update_backup(server, rserver, rpath, type, time, cred, description, id):
con, cur = get_cur()
sql = """update backups set server = '%s',
rhost = '%s',
rpath = '%s',
type = '%s',
time = '%s',
cred = '%s',
description = '%s' where id = '%s' """ % (server, rserver, rpath, type, time, cred, description, id)
try:
cur.execute(sql)
con.commit()
except sqltool.Error as e:
funct.out_error(e)
con.rollback()
return False
else:
return True
cur.close()
con.close()
def delete_backups(id):
con, cur = get_cur()
sql = """ delete from backups where id = %s """ % (id)
try:
cur.execute(sql)
con.commit()
except sqltool.Error as e:
funct.out_error(e)
con.rollback()
else:
return True
cur.close()
con.close()
def check_exists_backup(server):
con, cur = get_cur()
sql = """ select id from backups where server = '%s' """ % server
try:
cur.execute(sql)
except sqltool.Error as e:
funct.out_error(e)
else:
for s in cur.fetchall():
if s[0] is not None:
return True
else:
return False
cur.close()
con.close()
def insert_new_telegram(token, chanel, group):
con, cur = get_cur()
sql = """insert into telegram(`token`, `chanel_name`, `groups`) values ('%s', '%s', '%s') """ % (token, chanel, group)
try:
cur.execute(sql)
con.commit()
except sqltool.Error as e:
print('<span class="alert alert-danger" id="error">An error occurred: ' + e.args[0] + ' <a title="Close" id="errorMess"><b>X</b></a></span>')
con.rollback()
else:
return True
cur.close()
con.close()
def delete_telegram(id):
con, cur = get_cur()
sql = """ delete from telegram where id = %s """ % (id)
try:
cur.execute(sql)
con.commit()
except sqltool.Error as e:
funct.out_error(e)
con.rollback()
else:
return True
cur.close()
con.close()
def select_telegram(**kwargs):
con, cur = get_cur()
sql = """select * from telegram """
if kwargs.get('group'):
sql = """select * from telegram where groups = '%s' """ % kwargs.get('group')
if kwargs.get('token'):
sql = """select * from telegram where token = '%s' """ % kwargs.get('token')
if kwargs.get('id'):
sql = """select * from telegram where id = '%s' """ % kwargs.get('id')
try:
cur.execute(sql)
except sqltool.Error as e:
funct.out_error(e)
else:
return cur.fetchall()
cur.close()
con.close()
def insert_new_telegram(token, chanel, group):
con, cur = get_cur()
sql = """insert into telegram(`token`, `chanel_name`, `groups`) values ('%s', '%s', '%s') """ % (token, chanel, group)
try:
cur.execute(sql)
con.commit()
except sqltool.Error as e:
print('<span class="alert alert-danger" id="error">An error occurred: ' + e.args[0] + ' <a title="Close" id="errorMess"><b>X</b></a></span>')
con.rollback()
else:
return True
cur.close()
con.close()
def update_telegram(token, chanel, group, id):
con, cur = get_cur()
sql = """ update telegram set
`token` = '%s',
`chanel_name` = '%s',
`groups` = '%s'
where id = '%s' """ % (token, chanel, group, id)
try:
cur.execute(sql)
con.commit()
except sqltool.Error as e:
funct.out_error(e)
con.rollback()
cur.close()
con.close()
def insert_new_option(option, group):
con, cur = get_cur()
sql = """insert into options(`options`, `groups`) values ('%s', '%s') """ % (option, group)
try:
cur.execute(sql)
con.commit()
except sqltool.Error as e:
funct.out_error(e)
con.rollback()
else:
return True
cur.close()
con.close()
def select_options(**kwargs):
con, cur = get_cur()
sql = """select * from options """
if kwargs.get('option'):
sql = """select * from options where options = '%s' """ % kwargs.get('option')
if kwargs.get('group'):
sql = """select options from options where groups = '{}' and options like '{}%' """.format(kwargs.get('group'), kwargs.get('term'))
try:
cur.execute(sql)
except sqltool.Error as e:
funct.out_error(e)
else:
return cur.fetchall()
cur.close()
con.close()
def update_options(option, id):
con, cur = get_cur()
sql = """ update options set
options = '%s'
where id = '%s' """ % (option, id)
try:
cur.execute(sql)
con.commit()
except sqltool.Error as e:
funct.out_error(e)
con.rollback()
cur.close()
con.close()
def delete_option(id):
con, cur = get_cur()
sql = """ delete from options where id = %s """ % (id)
try:
cur.execute(sql)
con.commit()
except sqltool.Error as e:
funct.out_error(e)
con.rollback()
else:
return True
cur.close()
con.close()
def insert_new_savedserver(server, description, group):
con, cur = get_cur()
sql = """insert into saved_servers(`server`, `description`, `groups`) values ('%s', '%s', '%s') """ % (server, description, group)
try:
cur.execute(sql)
con.commit()
except sqltool.Error as e:
funct.out_error(e)
con.rollback()
else:
return True
cur.close()
con.close()
def select_saved_servers(**kwargs):
con, cur = get_cur()
sql = """select * from saved_servers """
if kwargs.get('server'):
sql = """select * from saved_servers where server = '%s' """ % kwargs.get('server')
if kwargs.get('group'):
sql = """select server,description from saved_servers where groups = '{}' and server like '{}%' """.format(kwargs.get('group'), kwargs.get('term'))
try:
cur.execute(sql)
except sqltool.Error as e:
funct.out_error(e)
else:
return cur.fetchall()
cur.close()
con.close()
def update_savedserver(server, description, id):
con, cur = get_cur()
sql = """ update saved_servers set
server = '%s',
description = '%s'
where id = '%s' """ % (server, description, id)
try:
cur.execute(sql)
con.commit()
except sqltool.Error as e:
funct.out_error(e)
con.rollback()
cur.close()
con.close()
def delete_savedserver(id):
con, cur = get_cur()
sql = """ delete from saved_servers where id = %s """ % (id)
try:
cur.execute(sql)
con.commit()
except sqltool.Error as e:
funct.out_error(e)
con.rollback()
else:
return True
cur.close()
con.close()
def insert_mentrics(serv, curr_con, cur_ssl_con, sess_rate, max_sess_rate):
con, cur = get_cur()
if mysql_enable == '1':
sql = """ insert into metrics (serv, curr_con, cur_ssl_con, sess_rate, max_sess_rate, date) values('%s', '%s', '%s', '%s', '%s', now()) """ % (serv, curr_con, cur_ssl_con, sess_rate, max_sess_rate)
else:
sql = """ insert into metrics (serv, curr_con, cur_ssl_con, sess_rate, max_sess_rate, date) values('%s', '%s', '%s', '%s', '%s', datetime('now', 'localtime')) """ % (serv, curr_con, cur_ssl_con, sess_rate, max_sess_rate)
try:
cur.execute(sql)
con.commit()
except sqltool.Error as e:
funct.out_error(e)
con.rollback()
cur.close()
con.close()
def select_waf_metrics_enable(id):
con, cur = get_cur()
sql = """ select waf.metrics from waf left join servers as serv on waf.server_id = serv.id where server_id = '%s' """ % id
try:
cur.execute(sql)
except sqltool.Error as e:
funct.out_error(e)
else:
return cur.fetchall()
cur.close()
con.close()
def select_waf_metrics_enable_server(ip):
con, cur = get_cur()
sql = """ select waf.metrics from waf left join servers as serv on waf.server_id = serv.id where ip = '%s' """ % ip
try:
cur.execute(sql)
except sqltool.Error as e:
funct.out_error(e)
else:
for enable in cur.fetchall():
return enable[0]
cur.close()
con.close()
def select_waf_servers(serv):
con, cur = get_cur()
sql = """ select serv.ip from waf left join | |
style.A.update({'type':'text/css'})
>>> html = style.render()
>>> a = '<style type="text/css">body { margin:4px; } p { color:blue; }</style>'
>>> b = '<style type="text/css">p { color:blue; } body { margin:4px; }</style>'
>>> html in (a,b) # order is indeterminate, so test both ways
True
>>> Style(body=dict(_moz_style_='foo')).render()
'<style>body { -moz-style:foo; }</style>'
"""
## convert selectors
_ = convertAttrKeys(content)
newcontent = {}
for k, v in _.items():
newcontent[k] = convertAttrKeys(v)
return HtmlElement('style', {}, newcontent)
#######################################################################
## Content Sectioning
## TODO hgroup
#######################################################################
def Address(*content, **attrs):
"""
Wrapper for address tag
>>> Address().render()
'<address></address>'
"""
return KWElement('address', *content, **attrs)
def Article(*content, **attrs):
"""
Wrapper for article tag
>>> Article().render()
'<article></article>'
"""
return KWElement('article', *content, **attrs)
def Aside(*content, **attrs):
"""
Wrapper for aside tag
>>> Aside().render()
'<aside></aside>'
"""
return KWElement('aside', *content, **attrs)
def Footer(*content, **attrs):
"""
Wrapper for footer tag
>>> Footer().render()
'<footer></footer>'
"""
return KWElement('footer', *content, **attrs)
def Header(*content, **attrs):
"""
Wrapper for header tag
>>> Header().render()
'<header></header>'
"""
return KWElement('header', *content, **attrs)
def H1(*content, **attrs):
"""
Wrapper for h1 tag
>>> H1().render()
'<h1></h1>'
"""
return KWElement('h1', *content, **attrs)
def H2(*content, **attrs):
"""
Wrapper for h2 tag
>>> H2().render()
'<h2></h2>'
"""
return KWElement('h2', *content, **attrs)
def H3(*content, **attrs):
"""
Wrapper for h3 tag
>>> H3().render()
'<h3></h3>'
"""
return KWElement('h3', *content, **attrs)
def H4(*content, **attrs):
"""
Wrapper for h4 tag
>>> H4().render()
'<h4></h4>'
"""
return KWElement('h4', *content, **attrs)
def H5(*content, **attrs):
"""
Wrapper for h5 tag
>>> H5().render()
'<h5></h5>'
"""
return KWElement('h5', *content, **attrs)
def H6(*content, **attrs):
"""
Wrapper for h6 tag
>>> H6().render()
'<h6></h6>'
"""
return KWElement('h6', *content, **attrs)
def Nav(*content, **attrs):
"""
Wrapper for nav tag
>>> Nav().render()
'<nav></nav>'
"""
return KWElement('nav', *content, **attrs)
def Section(*content, **attrs):
"""
Wrapper for section tag
>>> Section().render()
'<section></section>'
"""
return KWElement('section', *content, **attrs)
#######################################################################
## Text Content
#######################################################################
def Blockquote(*content, **attrs):
"""
Wrapper for blockquote tag
>>> Blockquote().render()
'<blockquote></blockquote>'
"""
return KWElement('blockquote', *content, **attrs)
def Dd(*content, **attrs):
"""
Wrapper for dd tag
>>> Dd().render()
'<dd></dd>'
"""
return KWElement('dd', *content, **attrs)
def Div(*content, **attrs):
"""
Wrapper for div tag
>>> Div().render()
'<div></div>'
"""
return KWElement('div', *content, **attrs)
def Dl(*content, **attrs):
"""
Wrapper for dl tag
>>> Dl().render()
'<dl></dl>'
"""
return KWElement('dl', *content, **attrs)
def Dt(*content, **attrs):
"""
Wrapper for dt tag
>>> Dt().render()
'<dt></dt>'
"""
return KWElement('dt', *content, **attrs)
def Figcaption(*content, **attrs):
"""
Wrapper for figcaption tag
>>> Figcaption().render()
'<figcaption></figcaption>'
"""
return KWElement('figcaption', *content, **attrs)
def Figure(*content, **attrs):
"""
Wrapper for figure tag
>>> Figure().render()
'<figure></figure>'
"""
return KWElement('figure', *content, **attrs)
def Hr(**attrs):
"""
Wrapper for hr tag
>>> Hr().render()
'<hr>'
"""
return KWElement('hr', None, **attrs)
def Li(*content, **attrs):
"""
Wrapper for li tag
>>> Li().render()
'<li></li>'
"""
return KWElement('li', *content, **attrs)
def Main(*content, **attrs):
"""
Wrapper for main tag
>>> Main().render()
'<main></main>'
"""
return KWElement('main', *content, **attrs)
def Ol(*content, **attrs):
"""
Wrapper for ol tag
>>> Ol().render()
'<ol></ol>'
"""
return KWElement('ol', *content, **attrs)
def P(*content, **attrs):
"""
Wrapper for p tag
>>> P().render()
'<p></p>'
"""
return KWElement('p', *content, **attrs)
def Pre(*content, **attrs):
"""
Wrapper for pre tag
>>> Pre().render()
'<pre></pre>'
"""
return KWElement('pre', *content, **attrs)
def Ul(*content, **attrs):
"""
Wrapper for ul tag
>>> Ul().render()
'<ul></ul>'
"""
return KWElement('ul', *content, **attrs)
#######################################################################
## Inline Text Semantics
## TODO abbr, bdi, bdo, data, dfn, kbd, mark, q, rp, rt, rtc, ruby,
## time, var, wbr
#######################################################################
def A(*content, **attrs):
"""
Wrapper for a tag
>>> A("Example", href="https://example.com").render()
'<a href="https://example.com">Example</a>'
"""
return KWElement('a', *content, **attrs)
def B(*content, **attrs):
"""
Wrapper for b tag
>>> B().render()
'<b></b>'
"""
return KWElement('b', *content, **attrs)
def Br(**attrs):
"""
Wrapper for br tag
>>> Br().render()
'<br>'
"""
return KWElement('br', None, **attrs)
def Cite(*content, **attrs):
"""
Wrapper for cite tag
>>> Cite().render()
'<cite></cite>'
"""
return KWElement('cite', *content, **attrs)
def Code(*content, **attrs):
"""
Wrapper for code tag
>>> Code().render()
'<code></code>'
"""
return KWElement('code', *content, **attrs)
def Em(*content, **attrs):
"""
Wrapper for em tag
>>> Em().render()
'<em></em>'
"""
return KWElement('em', *content, **attrs)
def I(*content, **attrs):
"""
Wrapper for i tag
>>> I().render()
'<i></i>'
"""
return KWElement('i', *content, **attrs)
def S(*content, **attrs):
"""
Wrapper for s tag
>>> S().render()
'<s></s>'
"""
return KWElement('s', *content, **attrs)
def Samp(*content, **attrs):
"""
Wrapper for samp tag
>>> Samp().render()
'<samp></samp>'
"""
return KWElement('samp', *content, **attrs)
def Small(*content, **attrs):
"""
Wrapper for small tag
>>> Small().render()
'<small></small>'
"""
return KWElement('small', *content, **attrs)
def Span(*content, **attrs):
"""
Wrapper for span tag
>>> Span().render()
'<span></span>'
"""
return KWElement('span', *content, **attrs)
def Strong(*content, **attrs):
"""
Wrapper for strong tag
>>> Strong().render()
'<strong></strong>'
"""
return KWElement('strong', *content, **attrs)
def Sub(*content, **attrs):
"""
Wrapper for sub tag
>>> Sub().render()
'<sub></sub>'
"""
return KWElement('sub', *content, **attrs)
def Sup(*content, **attrs):
"""
Wrapper for sup tag
>>> Sup().render()
'<sup></sup>'
"""
return KWElement('sup', *content, **attrs)
def U(*content, **attrs):
"""
Wrapper for u tag
>>> U().render()
'<u></u>'
"""
return KWElement('u', *content, **attrs)
#######################################################################
## Image and Multimedia
#######################################################################
def Area(**attrs):
"""
Wrapper for area tag
>>> Area().render()
'<area>'
"""
return KWElement('area', None, **attrs)
def Audio(*content, **attrs):
"""
Wrapper for audio tag
>>> Audio().render()
'<audio></audio>'
"""
return KWElement('audio', *content, **attrs)
def Img(**attrs):
"""
Wrapper for img tag
>>> Img().render()
'<img>'
"""
return KWElement('img', None, **attrs)
def Map(*content, **attrs):
"""
Wrapper for map tag
>>> Map().render()
'<map></map>'
"""
return KWElement('map', *content, **attrs)
def Track(**attrs):
"""
Wrapper for track tag
>>> Track().render()
'<track>'
"""
return KWElement('track', None, **attrs)
def Video(*content, **attrs):
"""
Wrapper for video tag
>>> Video().render()
'<video></video>'
"""
return KWElement('video', *content, **attrs)
#######################################################################
## Embedded Content
#######################################################################
def Embed(**attrs):
"""
Wrapper for embed tag
>>> Embed().render()
'<embed>'
"""
return KWElement('embed', None, **attrs)
def Object(*content, **attrs):
"""
Wrapper for object tag
>>> Object().render()
'<object></object>'
"""
return KWElement('object', *content, **attrs)
def Param(**attrs):
"""
Wrapper for param tag
>>> Param().render()
'<param>'
"""
return KWElement('param', None, **attrs)
def Source(**attrs):
"""
Wrapper for source tag
>>> Source().render()
'<source>'
"""
return KWElement('source', None, **attrs)
#######################################################################
## Scripting
#######################################################################
def Canvas(*content, **attrs):
"""
Wrapper for canvas tag
>>> Canvas().render()
'<canvas></canvas>'
"""
return KWElement('canvas', *content, **attrs)
def Noscript(*content, **attrs):
"""
Wrapper for noscript tag
>>> Noscript().render()
'<noscript></noscript>'
"""
return KWElement('noscript', *content, **attrs)
def Script(*content, **attrs):
"""
Wrapper for script tag
>>> Script().render()
'<script></script>'
"""
return KWElement('script', *content, **attrs)
#######################################################################
## Demarcating Edits
## TODO del, ins
#######################################################################
#######################################################################
## Table Content
## TODO colgroup (maybe. It's poorly supported.)
#######################################################################
def Caption(*content, **attrs):
"""
Wrapper for caption tag
>>> Caption().render()
'<caption></caption>'
"""
return KWElement('caption', *content, **attrs)
def Col(**attrs):
"""
Wrapper for col tag
>>> Col().render()
'<col>'
"""
return KWElement('col', None, **attrs)
def Table(*content, **attrs):
"""
Wrapper for table tag
>>> Table().render()
'<table></table>'
"""
return KWElement('table', *content, **attrs)
def Tbody(*content, **attrs):
"""
Wrapper for tbody tag
>>> Tbody().render()
'<tbody></tbody>'
"""
return KWElement('tbody', *content, **attrs)
def Td(*content, **attrs):
"""
Wrapper for td tag
>>> Td().render()
'<td></td>'
"""
return KWElement('td', *content, **attrs)
def Tfoot(*content, **attrs):
"""
Wrapper for tfoot tag
>>> Tfoot().render()
'<tfoot></tfoot>'
"""
return KWElement('tfoot', *content, **attrs)
def Th(*content, **attrs):
"""
Wrapper for th tag
>>> Th().render()
'<th></th>'
"""
return KWElement('th', *content, **attrs)
def Thead(*content, **attrs):
"""
Wrapper for thead tag
>>> Thead().render()
'<thead></thead>'
"""
return KWElement('thead', *content, **attrs)
def Tr(*content, **attrs):
"""
Wrapper for tr tag
>>> Tr().render()
'<tr></tr>'
"""
return KWElement('tr', *content, **attrs)
#######################################################################
## Forms
#######################################################################
def Button(*content, **attrs):
"""
Wrapper for button tag
>>> Button().render()
'<button></button>'
"""
return KWElement('button', *content, **attrs)
def Datalist(*content, **attrs):
"""
Wrapper for datalist tag
>>> Datalist().render()
'<datalist></datalist>'
"""
return KWElement('datalist', *content, **attrs)
def Fieldset(*content, **attrs):
"""
Wrapper for fieldset tag
>>> Fieldset().render()
'<fieldset></fieldset>'
"""
return KWElement('fieldset', *content, **attrs)
def Form(*content, **attrs):
"""
Wrapper for form tag
>>> Form().render()
'<form></form>'
"""
return KWElement('form', *content, **attrs)
def Input(**attrs):
"""
Wrapper for input tag
>>> Input().render()
'<input>'
"""
return KWElement('input', None, **attrs)
def Label(*content, **attrs):
"""
Wrapper for label tag
>>> Label().render()
'<label></label>'
"""
return KWElement('label', *content, **attrs)
def Legend(*content, **attrs):
"""
Wrapper for legend tag
>>> Legend().render()
'<legend></legend>'
"""
return KWElement('legend', *content, **attrs)
def Meter(*content, **attrs):
"""
Wrapper for meter tag
>>> Meter().render()
'<meter></meter>'
"""
return KWElement('meter', *content, **attrs)
def Optgroup(*content, **attrs):
"""
Wrapper for optgroup tag
>>> Optgroup().render()
'<optgroup></optgroup>'
"""
return KWElement('optgroup', *content, **attrs)
def Option(*content, **attrs):
"""
Wrapper for option tag
>>> Option().render()
'<option></option>'
"""
return KWElement('option', *content, **attrs)
def Output(*content, **attrs):
"""
Wrapper for output tag
>>> Output().render()
'<output></output>'
"""
return KWElement('output', *content, **attrs)
def Progress(*content, **attrs):
"""
Wrapper for progress tag
>>> Progress().render()
'<progress></progress>'
"""
return KWElement('progress', *content, **attrs)
def Select(*content, **attrs):
"""
Wrapper for select tag
>>> Select().render()
'<select></select>'
"""
return KWElement('select', *content, **attrs)
def Textarea(*content, **attrs):
"""
Wrapper for textarea tag
>>> Textarea().render()
'<textarea></textarea>'
"""
return KWElement('textarea', *content, **attrs)
#######################################################################
## HTML parser
#######################################################################
# Global | |
import base64
import json
import uuid
from abc import ABCMeta, abstractmethod # Abstract Base Class
import logging
from typing import List, Union, Dict, Any
from nexinfosys import Issue, IssuesOutputPairType
from nexinfosys.common.helper import create_dictionary, PartialRetrievalDictionary
logger = logging.getLogger(__name__)
class IExecutableCommand(metaclass=ABCMeta):
""" A command prepared for its execution. Commands have direct access to the current STATE """
@abstractmethod
def execute(self, state: "State") -> IssuesOutputPairType:
"""
Execute the command. At the same time, generate a list of issues.
At this point, it is assumed there are no syntactic errors
:param state:
:return: (list of issues, output)
"""
raise Exception("Execute not implemented")
# return None, None # Issues, Output
@abstractmethod
def estimate_execution_time(self):
pass
@abstractmethod
def json_serialize(self) -> Dict:
pass
# @abstractmethod
def json_deserialize(self, json_input: Union[dict, str, bytes, bytearray]) -> List[Issue]:
"""
Read command parameters from a JSON string
Check the validity of the JSON input
After this, the command can be executed ("execute")
:param json_input: JSON in a Unicode String
:return: --- (the object state is updated, ready for execution)
"""
issues = []
if isinstance(json_input, dict):
self._content = json_input
else:
self._content = json.loads(json_input)
return issues
class Scope:
""" The scope allows to assign names to entities using a registry """
def __init__(self, name=None):
self._name = name # A name for the scope itself
self._registry = create_dictionary()
@property
def name(self):
return self._name
@name.setter
def name(self, name):
self._name = name
def __contains__(self, key): # "in" operator to check if the key is present in the dictionary
return key in self._registry
def __getitem__(self, name):
if name in self._registry:
return self._registry[name]
else:
return None
def __setitem__(self, name: str, entity):
if name not in self._registry:
existing = True
else:
existing = False
self._registry[name] = entity
return existing
def __delitem__(self, name):
del self._registry[name]
def list(self):
""" List just the names of variables """
return self._registry.keys()
def list_pairs(self):
""" List tuples of variable name and value object """
return [(k, v2) for k, v2 in self._registry.items()]
class Namespace:
def __init__(self):
self.__scope = [] # List of scopes
self.__current_scope = None # type: Scope
self.__current_scope_idx = -1
self.new_scope()
# The registry will have "nested" scopes (names in the current scope take precedence on "higher" scopes)
# When searching for names, the search will go from the most recent scope to the oldest
def new_scope(self, name=None):
""" Create a new scope """
self.__current_scope = Scope()
self.__scope.append(self.__current_scope)
self.__current_scope_idx = len(self.__scope) - 1
if not name:
name = "Scope" + str(self.__current_scope_idx)
self.__current_scope.name = name
def close_scope(self):
if self.__current_scope:
del self.__scope[-1]
if self.__current_scope_idx >= 0:
self.__current_scope_idx -= 1
if self.__current_scope_idx >= 0:
self.__current_scope = self.__scope[-1]
else:
self.__current_scope = None
def list_names(self, scope=None):
""" Returns a list of the names of the registered entities of the "scope" or if None, of the CURRENT scope """
if not scope:
scope = self.__current_scope
return scope.list()
def list(self, scope=None):
"""
Returns a list of the names and values of the registered entities of
the "scope" or if None, of the CURRENT scope
"""
if not scope:
scope = self.__current_scope
return scope.list_pairs()
def list_all_names(self):
"""
Returns a list of the names of registered entities considering the scopes
Start from top level, end in bottom level (the current one, which takes precedence)
:return:
"""
t = create_dictionary()
for scope in self.__scope:
t.update(scope._registry)
return t.keys()
def list_all(self):
"""
Returns a list of the names and variables of registered entities considering the scopes
Start from top level, end in bottom level (the current one, which takes precedence)
:return:
"""
t = create_dictionary()
for scope in self.__scope:
t.update(scope._registry)
return [(k, v2) for k, v2 in t.items()]
def set(self, name: str, entity):
""" Set a named entity in the current scope. Previous scopes are not writable. """
if self.__current_scope:
var_exists = name in self.__current_scope
self.__current_scope[name] = entity
# if var_exists:
# logger.warning("'" + name + "' overwritten.")
def get(self, name: str, scope=None, return_scope=False):
""" Return the entity named "name". Return also the Scope in which it was found """
if not scope:
for scope_idx in range(len(self.__scope) - 1, -1, -1):
if name in self.__scope[scope_idx]:
if return_scope:
return self.__scope[scope_idx][name], self.__scope[scope_idx]
else:
return self.__scope[scope_idx][name]
else:
# logger.warning(
# "The name '" + name + "' was not found in the stack of scopes (" + str(len(self.__scope)) + ")")
if return_scope:
return None, None
else:
return None
else:
# TODO Needs proper implementation !!!! (when scope is a string, not a Scope instance, to be searched in the list of scopes "self.__scope")
if name in scope:
if return_scope:
return scope[name], scope
else:
return scope[name]
else:
logger.error("The name '" + name + "' was not found in scope '" + scope.name + "'")
if return_scope:
return None, None
else:
return None
class State:
"""
-- "State" in memory --
Commands may alter State or may just read it
It uses a dictionary of named Namespaces (and Namespaces can have several scopes)
Keeps a registry of variable names and the objects behind them.
It is basically a list of Namespaces. One is active by default.
The others have a name. Variables inside these other Namespaces may be accessed using that
name then "::", same as C++
State Serialization functions specialized in the way State is used in MuSIASEM are in the "serialization" module:
serialize_state
deserialize_state
"""
def __init__(self, d: Dict[str, Any] = None):
self._default_namespace = ""
self._namespaces = create_dictionary()
if d is not None and len(d) > 0:
self.update(d)
def new_namespace(self, name):
self._namespaces[name] = Namespace()
if self._default_namespace is None:
self._default_namespace = name
@property
def default_namespace(self):
return self._default_namespace
@default_namespace.setter
def default_namespace(self, name):
if name is not None: # Name has to have some value
self._default_namespace = name
def del_namespace(self, name):
if name in self._namespaces:
del self._namespaces
def list_namespaces(self):
return self._namespaces.keys()
def list_namespace_variables(self, namespace_name=None):
if namespace_name is None:
namespace_name = self._default_namespace
return self._namespaces[namespace_name].list_all()
def update(self, d: Dict[str, Any], namespace_name=None):
if namespace_name is None:
namespace_name = self._default_namespace
if namespace_name not in self._namespaces:
self.new_namespace(namespace_name)
for name, entity in d.items():
self._namespaces[namespace_name].set(name, entity)
# self._namespaces[namespace_name].update(d)
def set(self, name, entity, namespace_name=None):
if namespace_name is None:
namespace_name = self._default_namespace
if namespace_name not in self._namespaces:
self.new_namespace(namespace_name)
self._namespaces[namespace_name].set(name, entity)
def get(self, name, namespace_name=None, scope=None):
if not namespace_name:
namespace_name = self._default_namespace
if namespace_name not in self._namespaces:
self.new_namespace(namespace_name)
return self._namespaces[namespace_name].get(name, scope)
def get_case_study_registry_objects(state, namespace=None):
"""
Obtain the main entries of the state
:param state: Input state (modified also)
:param namespace: State supports several namespaces. This one serves to specify which one. Default=None
:return: Tuple: (global index, processor sets, hierarchies, datasets, mappings)
"""
# Index of ALL objects
glb_idx = state.get("_glb_idx", namespace)
if not glb_idx:
glb_idx = PartialRetrievalDictionary()
state.set("_glb_idx", glb_idx, namespace)
# ProcessorSet dict (dict of sets)
p_sets = state.get("_processor_sets", namespace)
if not p_sets:
p_sets = create_dictionary()
state.set("_processor_sets", p_sets, namespace)
# Hierarchies Dict
hh = state.get("_hierarchies", namespace)
if not hh:
hh = create_dictionary()
state.set("_hierarchies", hh, namespace)
# Datasets Dict
datasets = state.get("_datasets", namespace)
if not datasets:
datasets = create_dictionary()
state.set("_datasets", datasets, namespace)
# Mappings Dict
mappings = state.get("_mappings", namespace)
if not mappings:
mappings = create_dictionary()
state.set("_mappings", mappings, namespace)
return glb_idx, p_sets, hh, datasets, mappings
class LocallyUniqueIDManager:
"""
Obtains UUID but encoded in base85, which still is ASCII, but is more compact than the UUID standard hexadecimal
representation
"""
class __LocallyUniqueIDManager:
def __init__(self, c: int = 0):
self.val = c
def get_new_id(self, inc):
return base64.a85encode(uuid.uuid1().bytes).decode("ascii")
def __str__(self):
return repr(self) + self.val
instance = None
def __init__(self, arg=0):
if not LocallyUniqueIDManager.instance:
LocallyUniqueIDManager.instance = LocallyUniqueIDManager.__LocallyUniqueIDManager(arg)
else:
LocallyUniqueIDManager.instance.val = arg
def get_new_id(self, inc: int = 1):
return self.instance.get_new_id(inc)
def __getattr__(self, name):
return getattr(self.instance, name)
"""
API
* Open/close interactive session
* Identify
* Open a reproducible session (optionally load existing command_executors/state). Close it, optionally save.
* CRUD case studies, versions and variables (objects)
* Browse datasets
* Browse case study objects: mappings, hierarchies, grammars
* Submit Worksheet
- Interactive session
- Open work session
- Submit file
- the file produces a sequence of command_executors
- execute
- elaborate output file and compile issues
- Close Interactive session (save, new case study version)
- Close user session
* Submit R script
| |
# This file is part of PRAW.
#
# PRAW is free software: you can redistribute it and/or modify it under the
# terms of the GNU General Public License as published by the Free Software
# Foundation, either version 3 of the License, or (at your option) any later
# version.
#
# PRAW is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
# A PARTICULAR PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along with
# PRAW. If not, see <http://www.gnu.org/licenses/>.
"""
Helper functions.
The functions here provide functionality that is often needed by programs using
PRAW, but which isn't part of reddit's API.
"""
from __future__ import unicode_literals
import six
import sys
import time
from collections import deque
from functools import partial
from timeit import default_timer as timer
from praw.errors import HTTPException, PRAWException
from operator import attrgetter
BACKOFF_START = 4 # Minimum number of seconds to sleep during errors
KEEP_ITEMS = 128 # On each iteration only remember the first # items
# for conversion between broken reddit timestamps and unix timestamps
REDDIT_TIMESTAMP_OFFSET = 28800
def comment_stream(reddit_session, subreddit, limit=None, verbosity=1):
"""Indefinitely yield new comments from the provided subreddit.
Comments are yielded from oldest to newest.
:param reddit_session: The reddit_session to make requests from. In all the
examples this is assigned to the variable ``r``.
:param subreddit: Either a subreddit object, or the name of a
subreddit. Use `all` to get the comment stream for all comments made to
reddit.
:param limit: The maximum number of comments to fetch in a single
iteration. When None, fetch all available comments (reddit limits this
to 1000 (or multiple of 1000 for multi-subreddits). If this number is
too small, comments may be missed.
:param verbosity: A number that controls the amount of output produced to
stderr. <= 0: no output; >= 1: output the total number of comments
processed and provide the short-term number of comments processed per
second; >= 2: output when additional delays are added in order to avoid
subsequent unexpected http errors. >= 3: output debugging information
regarding the comment stream. (Default: 1)
"""
get_function = partial(reddit_session.get_comments,
six.text_type(subreddit))
return _stream_generator(get_function, limit, verbosity)
def submission_stream(reddit_session, subreddit, limit=None, verbosity=1):
"""Indefinitely yield new submissions from the provided subreddit.
Submissions are yielded from oldest to newest.
:param reddit_session: The reddit_session to make requests from. In all the
examples this is assigned to the variable ``r``.
:param subreddit: Either a subreddit object, or the name of a
subreddit. Use `all` to get the submissions stream for all submissions
made to reddit.
:param limit: The maximum number of submissions to fetch in a single
iteration. When None, fetch all available submissions (reddit limits
this to 1000 (or multiple of 1000 for multi-subreddits). If this number
is too small, submissions may be missed. Since there isn't a limit to
the number of submissions that can be retrieved from r/all, the limit
will be set to 1000 when limit is None.
:param verbosity: A number that controls the amount of output produced to
stderr. <= 0: no output; >= 1: output the total number of submissions
processed and provide the short-term number of submissions processed
per second; >= 2: output when additional delays are added in order to
avoid subsequent unexpected http errors. >= 3: output debugging
information regarding the submission stream. (Default: 1)
"""
if six.text_type(subreddit).lower() == "all":
if limit is None:
limit = 1000
if not hasattr(subreddit, 'reddit_session'):
subreddit = reddit_session.get_subreddit(subreddit)
return _stream_generator(subreddit.get_new, limit, verbosity)
def valid_redditors(redditors, sub):
"""Return a verified list of valid Redditor instances.
:param redditors: A list comprised of Redditor instances and/or strings
that are to be verified as actual redditor accounts.
:param sub: A Subreddit instance that the authenticated account has
flair changing permission on.
Note: Flair will be unset for all valid redditors in `redditors` on the
subreddit `mod_sub`.
"""
simplified = list(set(six.text_type(x).lower() for x in redditors))
return [sub.reddit_session.get_redditor(simplified[i], fetch=False)
for (i, resp) in enumerate(sub.set_flair_csv(
({'user': x, 'flair_text': x} for x in simplified)))
if resp['ok']]
def submissions_between(reddit_session,
subreddit,
lowest_timestamp=None,
highest_timestamp=None,
newest_first=True,
extra_cloudsearch_fields=None,
verbosity=1):
"""Yield submissions between two timestamps.
If both ``highest_timestamp`` and ``lowest_timestamp`` are unspecified,
yields all submissions in the ``subreddit``.
Submissions are yielded from newest to oldest(like in the "new" queue).
:param reddit_session: The reddit_session to make requests from. In all the
examples this is assigned to the variable ``r``.
:param subreddit: Either a subreddit object, or the name of a
subreddit. Use `all` to get the submissions stream for all submissions
made to reddit.
:param lowest_timestamp: The lower bound for ``created_utc`` atributed of
submissions.
(Default: subreddit's created_utc or 0 when subreddit == "all").
:param highest_timestamp: The upper bound for ``created_utc`` attribute
of submissions. (Default: current unix time)
NOTE: both highest_timestamp and lowest_timestamp are proper
unix timestamps(just like ``created_utc`` attributes)
:param newest_first: If set to true, yields submissions
from newest to oldest. Otherwise yields submissions
from oldest to newest
:param extra_cloudsearch_fields: Allows extra filtering of results by
parameters like author, self. Full list is available here:
https://www.reddit.com/wiki/search
:param verbosity: A number that controls the amount of output produced to
stderr. <= 0: no output; >= 1: output the total number of submissions
processed; >= 2: output debugging information regarding
the search queries. (Default: 1)
"""
def debug(msg, level):
if verbosity >= level:
sys.stderr.write(msg + '\n')
def format_query_field(k, v):
if k in ["nsfw", "self"]:
# even though documentation lists "no" and "yes"
# as possible values, in reality they don't work
if v not in [0, 1, "0", "1"]:
raise PRAWException("Invalid value for the extra"
"field {}. Only '0' and '1' are"
"valid values.".format(k))
return "{}:{}".format(k, v)
return "{}:'{}'".format(k, v)
if extra_cloudsearch_fields is None:
extra_cloudsearch_fields = {}
extra_query_part = " ".join(
[format_query_field(k, v) for (k, v)
in sorted(extra_cloudsearch_fields.items())]
)
if highest_timestamp is None:
highest_timestamp = int(time.time()) + REDDIT_TIMESTAMP_OFFSET
else:
highest_timestamp = int(highest_timestamp) + REDDIT_TIMESTAMP_OFFSET
if lowest_timestamp is not None:
lowest_timestamp = int(lowest_timestamp) + REDDIT_TIMESTAMP_OFFSET
elif not isinstance(subreddit, six.string_types):
lowest_timestamp = int(subreddit.created)
elif subreddit not in ("all", "contrib", "mod", "friend"):
lowest_timestamp = int(reddit_session.get_subreddit(subreddit).created)
else:
lowest_timestamp = 0
original_highest_timestamp = highest_timestamp
original_lowest_timestamp = lowest_timestamp
# When making timestamp:X..Y queries, reddit misses submissions
# inside X..Y range, but they can be found inside Y..Z range
# It is not clear what is the value of Z should be, but it seems
# like the difference is usually about ~1 hour or less
# To be sure, let's set the workaround offset to 2 hours
out_of_order_submissions_workaround_offset = 7200
highest_timestamp += out_of_order_submissions_workaround_offset
lowest_timestamp -= out_of_order_submissions_workaround_offset
# Those parameters work ok, but there may be a better set of parameters
window_size = 60 * 60
search_limit = 100
min_search_results_in_window = 50
window_adjustment_ratio = 1.25
backoff = BACKOFF_START
processed_submissions = 0
prev_win_increased = False
prev_win_decreased = False
while highest_timestamp >= lowest_timestamp:
try:
if newest_first:
t1 = max(highest_timestamp - window_size, lowest_timestamp)
t2 = highest_timestamp
else:
t1 = lowest_timestamp
t2 = min(lowest_timestamp + window_size, highest_timestamp)
search_query = 'timestamp:{}..{}'.format(t1, t2)
if extra_query_part:
search_query = "(and {} {})".format(search_query,
extra_query_part)
debug(search_query, 3)
search_results = list(reddit_session.search(search_query,
subreddit=subreddit,
limit=search_limit,
syntax='cloudsearch',
sort='new'))
debug("Received {0} search results for query {1}"
.format(len(search_results), search_query),
2)
backoff = BACKOFF_START
except HTTPException as exc:
debug("{0}. Sleeping for {1} seconds".format(exc, backoff), 2)
time.sleep(backoff)
backoff *= 2
continue
if len(search_results) >= search_limit:
power = 2 if prev_win_decreased else 1
window_size = int(window_size / window_adjustment_ratio**power)
prev_win_decreased = True
debug("Decreasing window size to {0} seconds".format(window_size),
2)
# Since it is possible that there are more submissions
# in the current window, we have to re-do the request
# with reduced window
continue
else:
prev_win_decreased = False
search_results = [s for s in search_results
if original_lowest_timestamp <= s.created and
s.created <= original_highest_timestamp]
for submission in sorted(search_results,
key=attrgetter('created_utc', 'id'),
reverse=newest_first):
yield submission
processed_submissions += len(search_results)
debug('Total processed submissions: {}'
.format(processed_submissions), 1)
if newest_first:
highest_timestamp -= (window_size + 1)
else:
lowest_timestamp += (window_size + 1)
if len(search_results) < | |
the Android '
'NDK, not that you don\'t have a normal compiler '
'installed. Exiting.')
exit(1)
env['CC'] = '{toolchain_prefix}-gcc {cflags}'.format(
toolchain_prefix=toolchain_prefix,
cflags=env['CFLAGS'])
env['CXX'] = '{toolchain_prefix}-g++ {cxxflags}'.format(
toolchain_prefix=toolchain_prefix,
cxxflags=env['CXXFLAGS'])
env['AR'] = '{}-ar'.format(toolchain_prefix)
env['RANLIB'] = '{}-ranlib'.format(toolchain_prefix)
env['LD'] = '{}-ld'.format(toolchain_prefix)
env['STRIP'] = '{}-strip --strip-unneeded'.format(toolchain_prefix)
env['MAKE'] = 'make -j5'
env['READELF'] = '{}-readelf'.format(toolchain_prefix)
hostpython_recipe = Recipe.get_recipe('hostpython2', self.ctx)
# AND: This hardcodes python version 2.7, needs fixing
# AND: This also hardcodes armeabi, which isn't even correct,
# don't forget to fix!
env['BUILDLIB_PATH'] = join(
hostpython_recipe.get_build_dir('armeabi'),
'build', 'lib.linux-{}-2.7'.format(uname()[-1]))
env['PATH'] = environ['PATH']
# AND: This stuff is set elsewhere in distribute.sh. Does that matter?
env['ARCH'] = self.arch
# env['LIBLINK_PATH'] = join(
# self.ctx.build_dir, 'other_builds', 'objects')
# ensure_dir(env['LIBLINK_PATH']) # AND: This should be elsewhere
return env
class ArchAndroid(Arch):
arch = "armeabi"
# class ArchSimulator(Arch):
# sdk = "iphonesimulator"
# arch = "i386"
# triple = "i386-apple-darwin11"
# version_min = "-miphoneos-version-min=6.0.0"
# sysroot = sh.xcrun("--sdk", "iphonesimulator", "--show-sdk-path").strip()
# class Arch64Simulator(Arch):
# sdk = "iphonesimulator"
# arch = "x86_64"
# triple = "x86_64-apple-darwin13"
# version_min = "-miphoneos-version-min=7.0"
# sysroot = sh.xcrun("--sdk", "iphonesimulator", "--show-sdk-path").strip()
# class ArchIOS(Arch):
# sdk = "iphoneos"
# arch = "armv7"
# triple = "arm-apple-darwin11"
# version_min = "-miphoneos-version-min=6.0.0"
# sysroot = sh.xcrun("--sdk", "iphoneos", "--show-sdk-path").strip()
# class Arch64IOS(Arch):
# sdk = "iphoneos"
# arch = "arm64"
# triple = "aarch64-apple-darwin13"
# version_min = "-miphoneos-version-min=7.0"
# sysroot = sh.xcrun("--sdk", "iphoneos", "--show-sdk-path").strip()
class Graph(object):
# Taken from the old python-for-android/depsort
# Modified to include alternative dependencies
def __init__(self):
# `graph`: dict that maps each package to a set of its dependencies.
self.graphs = [{}]
# self.graph = {}
def remove_redundant_graphs(self):
'''Removes possible graphs if they are equivalent to others.'''
graphs = self.graphs
initial_num_graphs = len(graphs)
# Walk the list backwards so that popping elements doesn't
# mess up indexing
for i in range(len(graphs) - 1):
graph = graphs[initial_num_graphs - 1 - i]
for j in range(1, len(graphs)):
comparison_graph = graphs[initial_num_graphs - 1 - j]
if set(comparison_graph.keys()) == set(graph.keys()):
graphs.pop(initial_num_graphs - 1 - i)
break
def add(self, dependent, dependency):
"""Add a dependency relationship to the graph"""
if isinstance(dependency, (tuple, list)):
for graph in self.graphs[:]:
for dep in dependency[1:]:
new_graph = deepcopy(graph)
self._add(new_graph, dependent, dep)
self.graphs.append(new_graph)
self._add(graph, dependent, dependency[0])
else:
for graph in self.graphs:
self._add(graph, dependent, dependency)
self.remove_redundant_graphs()
def _add(self, graph, dependent, dependency):
'''Add a dependency relationship to a specific graph, where dependency
must be a single dependency, not a list or tuple.
'''
graph.setdefault(dependent, set())
graph.setdefault(dependency, set())
if dependent != dependency:
graph[dependent].add(dependency)
def conflicts(self, conflict):
graphs = self.graphs
for i in range(len(graphs)):
graph = graphs[len(graphs) - 1 - i]
if conflict in graph:
graphs.pop(len(graphs) - 1 - i)
return len(graphs) == 0
def remove_remaining_conflicts(self, ctx):
# It's unpleasant to have to pass ctx as an argument...
'''Checks all possible graphs for conflicts that have arisen during
the additon of alternative repice branches, as these are not checked
for conflicts at the time.'''
new_graphs = []
for i, graph in enumerate(self.graphs):
for name in graph.keys():
recipe = Recipe.get_recipe(name, ctx)
if any([c in graph for c in recipe.conflicts]):
break
else:
new_graphs.append(graph)
self.graphs = new_graphs
def add_optional(self, dependent, dependency):
"""Add an optional (ordering only) dependency relationship to the graph
Only call this after all mandatory requirements are added
"""
for graph in self.graphs:
if dependent in graph and dependency in graph:
self._add(graph, dependent, dependency)
def find_order(self, index=0):
"""Do a topological sort on a dependency graph
:Parameters:
:Returns:
iterator, sorted items form first to last
"""
graph = self.graphs[index]
graph = dict((k, set(v)) for k, v in graph.items())
while graph:
# Find all items without a parent
leftmost = [l for l, s in graph.items() if not s]
if not leftmost:
raise ValueError('Dependency cycle detected! %s' % graph)
# If there is more than one, sort them for predictable order
leftmost.sort()
for result in leftmost:
# Yield and remove them from the graph
yield result
graph.pop(result)
for bset in graph.values():
bset.discard(result)
class Context(object):
'''A build context. If anything will be built, an instance this class
will be instantiated and used to hold all the build state.'''
env = environ.copy()
root_dir = None # the filepath of toolchain.py
storage_dir = None # the root dir where builds and dists will be stored
build_dir = None # in which bootstraps are copied for building and recipes are built
dist_dir = None # the Android project folder where everything ends up
libs_dir = None # where Android libs are cached after build but
# before being placed in dists
aars_dir = None
javaclass_dir = None
ccache = None # whether to use ccache
cython = None # the cython interpreter name
ndk_platform = None # the ndk platform directory
dist_name = None # should be deprecated in favour of self.dist.dist_name
bootstrap = None
bootstrap_build_dir = None
recipe_build_order = None # Will hold the list of all built recipes
@property
def packages_path(self):
'''Where packages are downloaded before being unpacked'''
return join(self.storage_dir, 'packages')
@property
def templates_dir(self):
return join(self.root_dir, 'templates')
@property
def libs_dir(self):
# Was previously hardcoded as self.build_dir/libs
dir = join(self.build_dir, 'libs_collections',
self.bootstrap.distribution.name)
ensure_dir(dir)
return dir
@property
def javaclass_dir(self):
# Was previously hardcoded as self.build_dir/java
dir = join(self.build_dir, 'javaclasses',
self.bootstrap.distribution.name)
ensure_dir(dir)
return dir
@property
def aars_dir(self):
dir = join(self.build_dir, 'aars', self.bootstrap.distribution.name)
ensure_dir(dir)
return dir
@property
def python_installs_dir(self):
dir = join(self.build_dir, 'python-installs')
ensure_dir(dir)
return dir
def get_python_install_dir(self):
dir = join(self.python_installs_dir, self.bootstrap.distribution.name)
return dir
def setup_dirs(self):
'''Calculates all the storage and build dirs, and makes sure
the directories exist where necessary.'''
self.root_dir = realpath(dirname(__file__))
# AND: TODO: Allow the user to set the build_dir
self.storage_dir = user_data_dir('python-for-android')
self.build_dir = join(self.storage_dir, 'build')
self.dist_dir = join(self.storage_dir, 'dists')
ensure_dir(self.storage_dir)
ensure_dir(self.build_dir)
ensure_dir(self.dist_dir)
@property
def android_api(self):
'''The Android API being targeted.'''
if self._android_api is None:
raise ValueError('Tried to access android_api but it has not '
'been set - this should not happen, something '
'went wrong!')
return self._android_api
@android_api.setter
def android_api(self, value):
self._android_api = value
@property
def ndk_ver(self):
'''The version of the NDK being used for compilation.'''
if self._ndk_ver is None:
raise ValueError('Tried to access android_api but it has not '
'been set - this should not happen, something '
'went wrong!')
return self._ndk_ver
@ndk_ver.setter
def ndk_ver(self, value):
self._ndk_ver = value
@property
def sdk_dir(self):
'''The path to the Android SDK.'''
if self._sdk_dir is None:
raise ValueError('Tried to access android_api but it has not '
'been set - this should not happen, something '
'went wrong!')
return self._sdk_dir
@sdk_dir.setter
def sdk_dir(self, value):
self._sdk_dir = value
@property
def ndk_dir(self):
'''The path to the Android NDK.'''
if self._ndk_dir is None:
raise ValueError('Tried to access android_api but it has not '
'been set - this should not happen, something '
'went wrong!')
return self._ndk_dir
@ndk_dir.setter
def ndk_dir(self, value):
self._ndk_dir = value
def prepare_build_environment(self, user_sdk_dir, user_ndk_dir,
user_android_api, user_ndk_ver):
'''Checks that build dependencies exist and sets internal variables
for the Android SDK etc.
..warning:: This *must* be called before trying any build stuff
'''
if self._build_env_prepared:
return
# AND: This needs revamping to carefully check each dependency
# in turn
ok = True
# Work out where the Android SDK is
sdk_dir = None
if user_sdk_dir:
sdk_dir = user_sdk_dir
if sdk_dir is None: # This is the old P4A-specific var
sdk_dir = environ.get('ANDROIDSDK', None)
if sdk_dir is None: # This seems used more conventionally
sdk_dir = environ.get('ANDROID_HOME', None)
if sdk_dir is None: # Checks in the buildozer SDK dir, useful
# # for debug tests of p4a
possible_dirs = glob.glob(expanduser(join(
'~', '.buildozer', 'android', 'platform', 'android-sdk-*')))
if possible_dirs:
info('Found possible SDK dirs in buildozer dir: {}'.format(
', '.join([d.split(os.sep)[-1] for d in possible_dirs])))
info('Will attempt to use SDK at {}'.format(possible_dirs[0]))
warning('This SDK lookup is intended for debug only, if you '
'use python-for-android much you should probably '
'maintain your own SDK download.')
sdk_dir = possible_dirs[0]
if sdk_dir is None:
warning('Android SDK dir was not specified, exiting.')
exit(1)
self.sdk_dir = realpath(sdk_dir)
# Check what Android API we're using
android_api = None
if user_android_api:
android_api = user_android_api
if android_api is not None:
info('Getting Android API version from user argument')
if android_api is | |
try:
iwd = Path(job_ad["iwd"])
except KeyError:
raise KeyError("Could not find iwd in job ad.")
return iwd / container_image
def annex_inner_func(
logger,
annex_name,
nodes,
lifetime,
allocation,
queue_at_machine,
owners,
collector,
token_file,
password_file,
ssh_target,
control_path,
cpus,
mem_mb,
):
if '@' in queue_at_machine:
(queue_name, target) = queue_at_machine.split('@', 1)
else:
error_string = "Target must have the form queue@machine."
target = queue_at_machine.casefold()
if target not in MACHINE_TABLE:
error_string = f"{error_string} Also, '{queue_at_machine}' is not a known machine."
else:
default_queue = MACHINE_TABLE[target]['default_queue']
queue_list = "\n ".join([q for q in MACHINE_TABLE[target]['queues']])
error_string = f"{error_string} Supported queues are:\n {queue_list}\nUse '{default_queue}' if you're not sure."
raise ValueError(error_string)
#
# We'll need to validate the requested nodes (or CPUs and memory)
# against the requested queue[-machine pair]. We also need to
# check the lifetime (and idle time, once we support it). Once
# all of that's been validated, we should print something like:
#
# Once established by Stampede 2, the annex will be available to
# run your jobs for no more than 48 hours (use --duration to change).
# It will self-destruct after 20 minutes of inactivity (use
# --idle to change).
#
# .. although it should maybe also include the queue name and size.
#
# We use this same method to determine the user name in `htcondor job`,
# so even if it's wrong, it will at least consistently so.
username = getpass.getuser()
target = target.casefold()
if target not in MACHINE_TABLE:
raise ValueError(f"{target} is not a known machine.")
# Location of the local universe script files
local_script_dir = (
Path(htcondor.param.get("LIBEXEC", "/usr/libexec/condor")) / "annex"
)
if not local_script_dir.is_dir():
raise RuntimeError(f"Annex script dir {local_script_dir} not found or not a directory.")
token_file = Path(token_file).expanduser()
if not token_file.exists():
raise RuntimeError(f"Token file {token_file} doesn't exist.")
control_path = Path(control_path).expanduser()
if control_path.is_dir():
if not control_path.exists():
logger.debug(f"{control_path} not found, attempt to create it")
control_path.mkdir(parents=True, exist_ok=True)
else:
raise RuntimeError(f"{control_path} must be a directory")
password_file = Path(password_file).expanduser()
if not password_file.exists():
try:
old_umask = os.umask(0o077)
with password_file.open("wb") as f:
password = <PASSWORD>(16)
f.write(password)
password_file.chmod(0o0400)
try:
os.umask(old_umask)
except OSError:
pass
except OSError as ose:
raise RuntimeError(
f"Password file {password_file} does not exist and could not be created: {ose}."
)
# Derived constants.
ssh_connection_sharing = [
"-o",
'ControlPersist="5m"',
"-o",
'ControlMaster="auto"',
"-o",
f'ControlPath="{control_path}/master-%C"',
]
ssh_indirect_command = ["gsissh", MACHINE_TABLE[target]["gsissh_name"]]
##
## While we're requiring that jobs are submitted before creating the
## annex (for .sif pre-staging purposes), refuse to make the annex
## if no such jobs exist.
##
schedd = htcondor.Schedd()
annex_jobs = schedd.query(f'TargetAnnexName == "{annex_name}"')
if len(annex_jobs) == 0:
raise RuntimeError(
f"No jobs for '{annex_name}' are in the queue. Use 'htcondor job submit --annex-name' to add them first."
)
logger.debug(
f"""Found {len(annex_jobs)} annex jobs matching 'TargetAnnexName == "{annex_name}"."""
)
# Extract the .sif file from each job.
sif_files = set()
for job_ad in annex_jobs:
sif_file = extract_sif_file(job_ad)
if sif_file is not None:
sif_file = Path(sif_file)
if sif_file.exists():
sif_files.add(sif_file)
else:
raise RuntimeError(
f"""Job {job_ad["ClusterID"]}.{job_ad["ProcID"]} specified container image '{sif_file}', which doesn't exist."""
)
if sif_files:
logger.debug(f"Got sif files: {sif_files}")
else:
logger.debug("No sif files found, continuing...")
# The .sif files will be transferred to the target machine later.
# Distinguish our text from SSH's text.
ANSI_BRIGHT = "\033[1m"
ANSI_RESET_ALL = "\033[0m"
##
## The user will do the 2FA/SSO dance here.
##
logger.info(
f"{ANSI_BRIGHT}This command will prompt you with a "
f"{MACHINE_TABLE[target]['pretty_name']} log-in. To proceed, "
f"log-in to {MACHINE_TABLE[target]['pretty_name']} at the prompt "
f"below; to cancel, hit CTRL-C.{ANSI_RESET_ALL}"
)
logger.debug(
f" (You can run 'ssh {' '.join(ssh_connection_sharing)} {ssh_target}' to use the shared connection.)"
)
rc = make_initial_ssh_connection(
ssh_connection_sharing,
ssh_target,
ssh_indirect_command,
)
if rc != 0:
raise RuntimeError(
f"Failed to make initial connection to {MACHINE_TABLE[target]['pretty_name']}, aborting ({rc})."
)
logger.info(f"{ANSI_BRIGHT}Thank you.{ANSI_RESET_ALL}\n")
##
## Register the clean-up function before creating the mess to clean-up.
##
remote_script_dir = None
# Allow atexit functions to run on SIGTERM.
signal.signal(signal.SIGTERM, lambda signal, frame: sys.exit(128 + 15))
# Hide the traceback on CTRL-C.
signal.signal(signal.SIGINT, lambda signal, frame: sys.exit(128 + 2))
# Remove the temporary directories on exit.
atexit.register(
lambda: remove_remote_temporary_directory(
logger,
ssh_connection_sharing,
ssh_target,
ssh_indirect_command,
remote_script_dir,
)
)
logger.debug("Making remote temporary directory...")
remote_script_dir = Path(
make_remote_temporary_directory(
logger,
ssh_connection_sharing,
ssh_target,
ssh_indirect_command,
)
)
logger.debug(f"... made remote temporary directory {remote_script_dir} ...")
logger.info(f"Populating annex temporary directory...")
populate_remote_temporary_directory(
logger,
ssh_connection_sharing,
ssh_target,
ssh_indirect_command,
target,
local_script_dir,
remote_script_dir,
token_file,
password_file,
)
if sif_files:
logger.debug("... transferring container images ...")
transfer_sif_files(
logger,
ssh_connection_sharing,
ssh_target,
ssh_indirect_command,
remote_script_dir,
sif_files,
)
logger.info("... populated.")
# Submit local universe job.
logger.debug("Submitting state-tracking job...")
local_job_executable = local_script_dir / "annex-local-universe.py"
if not local_job_executable.exists():
raise RuntimeError(
f"Could not find local universe executable, expected {local_job_executable}"
)
#
# The magic in this job description is thus:
# * hpc_annex_start_time is undefined until the job runs and finds
# a machine ad with a matching hpc_annex_request_id.
# * The job will go idle (because it can't start) at that poing,
# based on its Requirements.
# * Before then, the job's on_exit_remove must be false -- not
# undefined -- to make sure it keeps polling.
# * The job runs every five minutes because of cron_minute.
#
submit_description = htcondor.Submit(
{
"universe": "local",
# hpc_annex_start time is set by the job script when it finds
# a machine with a matching request ID. At that point, we can
# stop runnig this script, but we don't remove it to simplify
# the UI/UX code; instead, we wait until an hour past the end
# of the request's lifetime to trigger a peridic remove.
"requirements": "hpc_annex_start_time =?= undefined",
"executable": str(local_job_executable),
# Sadly, even if you set on_exit_remove to ! requirements,
# the job lingers in X state for a good long time.
"cron_minute": "*/5",
"on_exit_remove": "PeriodicRemove =?= true",
"periodic_remove": f"hpc_annex_start_time + {lifetime} + 3600 < time()",
# Consider adding a log, an output, and an error file to assist
# in debugging later. Problem: where should it go? How does it
# cleaned up?
"environment": f'PYTHONPATH={os.environ.get("PYTHONPATH", "")}',
"+arguments": f'strcat( "$(CLUSTER).0 hpc_annex_request_id ", GlobalJobID, " {collector}")',
"jobbatchname": f'{annex_name} [HPC Annex]',
"+hpc_annex_request_id": 'GlobalJobID',
# Properties of the annex request. We should think about
# representing these as a nested ClassAd. Ideally, the back-end
# would, instead of being passed a billion command-line arguments,
# just pull this ad from the collector (after this local universe
# job has forwarded it there).
"+hpc_annex_name": f'"{annex_name}"',
"+hpc_annex_queue_name": f'"{queue_name}"',
"+hpc_annex_collector": f'"{collector}"',
"+hpc_annex_lifetime": f'"{lifetime}"',
"+hpc_annex_owners": f'"{owners}"',
# FIXME: `nodes` should be undefined if not set on the
# command line but either cpus or mem_mb are.
"+hpc_annex_nodes": f'"{nodes}"'
if nodes is not None else "undefined",
"+hpc_annex_cpus": f'"{cpus}"'
if cpus is not None else "undefined",
"+hpc_annex_mem_mb": f'"{mem_mb}"'
if mem_mb is not None else "undefined",
"+hpc_annex_allocation": f'"{allocation}"'
if allocation is not None else "undefined",
# Hard state required for clean up. We'll be adding
# hpc_annex_PID, hpc_annex_PILOT_DIR, and hpc_annex_JOB_ID
# as they're reported by the back-end script.
"+hpc_annex_remote_script_dir": f'"{remote_script_dir}"',
}
)
try:
logger.debug(f"")
logger.debug(textwrap.indent(str(submit_description), " "))
submit_result = schedd.submit(submit_description)
except Exception:
raise RuntimeError(f"Failed to submit state-tracking job, aborting.")
cluster_id = submit_result.cluster()
logger.debug(f"... done.")
logger.debug(f"with cluster ID {cluster_id}.")
results = schedd.query(
f'ClusterID == {cluster_id} && ProcID == 0',
opts=htcondor.QueryOpts.DefaultMyJobsOnly,
projection=["GlobalJobID"],
)
request_id = results[0]["GlobalJobID"]
##
## We changed the job(s) at submit time to prevent them from running
## anywhere other than the annex, so it's OK to change them again to
## make it impossible to run them anywhere else before the annex job
## is successfully submitted. Doing so after allows for a race
## condition, so let's not, since we don't hvae to.
##
## Change the jobs so that they don't transfer the .sif files we just
## pre-staged.
##
## The startd can't rewrite the job ad the shadow uses to decide
## if it should transfer the .sif file, but it can change ContainerImage
## to point the pre-staged image, if it's just the basename. (Otherwise,
## it gets impossibly difficult to make the | |
from enum import Enum, auto
import itertools
from .figure import Figure, Circle
from .scene import Scene
from .util import Comment, divide, normalize_number, keys_for_triangle
class Property:
def __init__(self, property_key, point_set):
self.implications = []
self.property_key = property_key
self.point_set = point_set
self.__hash = None
self.__reason = None
@property
def reason(self):
return self.__reason
@reason.setter
def reason(self, value):
if self.__reason:
for pre in self.__reason.premises:
pre.implications = [p for p in pre.implications if p is not self]
while self in value.all_premises:
# TODO: select the best variant
for prop in value.all_premises:
if prop == self:
value = prop.reason
self.__reason = value
for pre in self.__reason.premises:
pre.implications.append(self)
self.fire_premises_change()
@property
def priority(self):
if not hasattr(self, 'rule'):
return self.__priority__ * 2
else:
return self.__priority__ * self.rule.priority()
@property
def __priority__(self):
return 3
def fire_premises_change(self):
self.reason.reset_premises()
for impl in self.implications:
impl.fire_premises_change()
def keys(self):
return []
def stringify(self, printer):
return self.description.stringify(printer)
def compare_values(self, other):
return True
def __str__(self):
return str(self.description)
def __eq__(self, other):
return type(self) == type(other) and self.property_key == other.property_key
def __hash__(self):
if self.__hash is None:
self.__hash = hash(type(self)) + hash(self.property_key)
return self.__hash
class PointAndCircleProperty(Property):
"""
Point location relative to circle
"""
class Kind(Enum):
inside = auto()
on = auto()
outside = auto()
def __str__(self):
return self.name
@staticmethod
def unique_key(point, cpoints_set):
return (point, cpoints_set)
def __init__(self, point, cpoint0, cpoint1, cpoint2, location):
self.point = point
self.circle_key = frozenset((cpoint0, cpoint1, cpoint2))
self.location = location
super().__init__(PointAndCircleProperty.unique_key(self.point, self.circle_key), {point, cpoint0, cpoint1, cpoint2})
def keys(self):
return self.property_key
@property
def description(self):
if self.location == PointAndCircleProperty.Kind.inside:
pattern ='$%{point:pt}$ lies inside $%{circle:circ}$'
elif self.location == PointAndCircleProperty.Kind.outside:
pattern ='$%{point:pt}$ lies outside of $%{circle:circ}$'
elif self.location == PointAndCircleProperty.Kind.on:
pattern ='$%{point:pt}$ lies on $%{circle:circ}$'
return Comment(pattern, {'pt': self.point, 'circ': Circle(*self.circle_key)})
def compare_values(self, other):
return self.location == other.location
class CircleCoincidenceProperty(Property):
"""
Two circles (defined by triples of points) are [not] coincident
"""
def __init__(self, triple0, triple1, coincident):
self.circle_keys = (frozenset(triple0), frozenset(triple1))
super().__init__(frozenset(self.circle_keys), {*triple0, *triple1})
self.coincident = coincident
@property
def __priority__(self):
return 1
@property
def description(self):
if self.coincident:
pattern = '$%{circle:c0}$ coincides with $%{circle:c1}$'
else:
pattern = '$%{circle:c0}$ and $%{circle:c1}$ differ'
return Comment(
pattern, {'c0': Circle(*self.circle_keys[0]), 'c1': Circle(*self.circle_keys[1])}
)
def compare_values(self, other):
return self.coincident == other.coincident
class ConcyclicPointsProperty(Property):
"""
Concyclic points
"""
def __init__(self, *points):
assert len(points) == 4
self.points = points
super().__init__(frozenset(self.points), set(points))
@property
def __priority__(self):
return 1
@property
def description(self):
return Comment(
'Points $%{point:pt0}$, $%{point:pt1}$, $%{point:pt2}$, and $%{point:pt3}$ are concyclic',
dict(('pt%d' % index, pt) for index, pt in enumerate(self.points))
)
class PointOnLineProperty(Property):
"""
A point lies [not] on a line
"""
def __init__(self, point, segment, on_line):
super().__init__((point, segment), {point, *segment.points})
self.point = point
self.segment = segment
self.on_line = on_line
@property
def __priority__(self):
return 1
@property
def description(self):
if self.on_line:
pattern = '$%{point:point}$ lies on line $%{line:line}$'
else:
pattern = '$%{point:point}$ does not lie on line $%{line:line}$'
return Comment(pattern, {'point': self.point, 'line': self.segment})
def compare_values(self, other):
return self.on_line == other.on_line
class LinesCoincidenceProperty(Property):
"""
Two lines (defined by segments) are [not] coincident
"""
def __init__(self, segment0, segment1, coincident):
self.segments = (segment0, segment1)
super().__init__(frozenset(self.segments), {*segment0.points, *segment1.points})
self.coincident = coincident
@property
def __priority__(self):
return 1
@property
def description(self):
if self.coincident:
pattern = '$%{line:line0}$ is the same line as $%{line:line1}$'
else:
pattern = '$%{line:line0}$ and $%{line:line1}$ are different lines'
return Comment(pattern, {'line0': self.segments[0], 'line1': self.segments[1]})
def compare_values(self, other):
return self.coincident == other.coincident
class PointsCollinearityProperty(Property):
"""
[Not] collinear points
"""
def __init__(self, point0, point1, point2, collinear):
self.points = (point0, point1, point2)
super().__init__(frozenset(self.points), {point0, point1, point2})
self.collinear = collinear
@property
def __priority__(self):
return 1
def keys(self, lengths=None):
return keys_for_triangle(Scene.Triangle(*self.points), lengths)
@property
def description(self):
if self.collinear:
pattern = 'Points $%{point:pt0}$, $%{point:pt1}$, and $%{point:pt2}$ are collinear'
else:
pattern = 'Points $%{point:pt0}$, $%{point:pt1}$, and $%{point:pt2}$ are not collinear'
return Comment(pattern, {'pt0': self.points[0], 'pt1': self.points[1], 'pt2': self.points[2]})
def compare_values(self, other):
return self.collinear == other.collinear
class ParallelVectorsProperty(Property):
"""
Two vectors are parallel (or at least one of them has zero length)
"""
def __init__(self, vector0, vector1):
self.vectors = (vector0, vector1)
super().__init__(frozenset(self.vectors), {*vector0.points, *vector1.points})
def keys(self):
return [self.vectors[0].as_segment, self.vectors[1].as_segment]
@property
def __priority__(self):
return 1
@property
def description(self):
return Comment(
'$%{vector:vec0} \\uparrow\\!\\!\\!\\uparrow %{vector:vec1}$',
{'vec0': self.vectors[0], 'vec1': self.vectors[1]}
)
class ParallelSegmentsProperty(Property):
"""
Two segments are parallel (or at least one of them has zero length)
"""
def __init__(self, segment0, segment1):
self.segments = (segment0, segment1)
super().__init__(frozenset(self.segments), {*segment0.points, *segment1.points})
def keys(self):
return self.segments
@property
def __priority__(self):
return 1
@property
def description(self):
return Comment(
'$%{segment:seg0} \\,\\|\\, %{segment:seg1}$',
{'seg0': self.segments[0], 'seg1': self.segments[1]}
)
class PerpendicularSegmentsProperty(Property):
"""
Two segments are perpendicular (or at least one of them has zero length)
"""
def __init__(self, segment0, segment1):
self.segments = (segment0, segment1)
super().__init__(frozenset(self.segments), {*segment0.points, *segment1.points})
def keys(self):
return self.segments
@property
def __priority__(self):
return 1
@property
def description(self):
return Comment(
'$%{segment:seg0} \\perp %{segment:seg1}$',
{'seg0': self.segments[0], 'seg1': self.segments[1]}
)
class PointsCoincidenceProperty(Property):
"""
[Not] coincident points
"""
def __init__(self, point0, point1, coincident):
assert isinstance(point0, Scene.Point)
assert isinstance(point1, Scene.Point)
assert point0 != point1
self.points = [point0, point1]
super().__init__(frozenset(self.points), {point0, point1})
self.coincident = coincident
@property
def __priority__(self):
return 3 if self.coincident else 1
def keys(self):
return [self.points[0].segment(self.points[1]), *self.points]
@property
def description(self):
if self.coincident:
pattern = 'Points $%{point:pt0}$ and $%{point:pt1}$ are coincident'
else:
pattern = 'Points $%{point:pt0}$ and $%{point:pt1}$ are not coincident'
return Comment(pattern, {'pt0': self.points[0], 'pt1': self.points[1]})
def compare_values(self, other):
return self.coincident == other.coincident
class SameOrOppositeSideProperty(Property):
"""
Two points on opposite/same sides of a line
"""
@staticmethod
def unique_key(segment, point0, point1):
return frozenset([segment, point0, point1])
def __init__(self, segment, point0, point1, same):
self.segment = segment
self.points = (point0, point1)
self.same = same
super().__init__(SameOrOppositeSideProperty.unique_key(segment, point0, point1), {point0, point1, *segment.points})
@property
def __priority__(self):
return 1
def keys(self):
return [self.segment]
@property
def description(self):
if self.same:
pattern = '$%{point:pt0}$, $%{point:pt1}$ located on the same side of line $%{line:line}$'
else:
pattern = '$%{point:pt0}$, $%{point:pt1}$ located on opposite sides of line $%{line:line}$'
return Comment(pattern, {'pt0': self.points[0], 'pt1': self.points[1], 'line': self.segment})
def compare_values(self, other):
return self.same == other.same
class PointInsideAngleProperty(Property):
"""
A point lies inside an angle
"""
def __init__(self, point, angle):
self.point = point
self.angle = angle
super().__init__((point, angle), {point, *angle.point_set})
@property
def __priority__(self):
return 1
@property
def description(self):
return Comment('$%{point:pt}$ lies inside $%{angle:angle}$', {'pt': self.point, 'angle': self.angle})
def keys(self):
return [self.point, self.angle]
class EquilateralTriangleProperty(Property):
"""
Equilateral triangle
"""
def __init__(self, points):
self.triangle = points if isinstance(points, Scene.Triangle) else Scene.Triangle(*points)
super().__init__(frozenset(self.triangle.points), {*self.triangle.points})
def keys(self, lengths=None):
return keys_for_triangle(self.triangle, lengths)
@property
def __priority__(self):
return 4.5
@property
def description(self):
return Comment('$%{triangle:triangle}$ is equilateral', {'triangle': self.triangle})
class SquareProperty(Property):
"""
Square
"""
@staticmethod
def unique_key(four_points):
def perms(four):
return [four, (*four[1:], four[0]), (*four[2:], *four[:2]), (four[3], *four[:3])]
return frozenset(perms(four_points) + perms(tuple(reversed(four_points))))
def __init__(self, square):
assert len(square.points) == 4
self.square = square
super().__init__(SquareProperty.unique_key(square.points), {*square.points})
@property
def __priority__(self):
return 4
@property
def description(self):
return Comment('$%{polygon:square}$ is a square', {'square': self.square})
class NondegenerateSquareProperty(Property):
"""
Non-degenerate square
"""
def __init__(self, square):
assert len(square.points) == 4
self.square = square
super().__init__(SquareProperty.unique_key(square.points), {*square.points})
@property
def __priority__(self):
return 4.5
@property
def description(self):
return Comment('$%{polygon:square}$ is a non-degenerate square', {'square': self.square})
class CentreOfEquilateralTriangleProperty(Property):
"""
A point is the centre of equilateral triangle
"""
def __init__(self, centre, triangle):
self.centre = centre
self.triangle = triangle
super().__init__((centre, frozenset(triangle.points)), {centre, *self.triangle.points})
@property
def __priority__(self):
return 4.5
@property
def description(self):
return Comment(
'$%{point:centre}$ is the centre of equilateral $%{triangle:triangle}$',
{'centre': self.centre, 'triangle': self.triangle}
)
class AngleKindProperty(Property):
"""
An angle is acute/obtuse/right
"""
class Kind(Enum):
acute = auto()
right = auto()
obtuse = auto()
def __str__(self):
return self.name
def __init__(self, angle, kind):
self.angle = angle
self.kind = kind
super().__init__(angle, self.angle.point_set)
def keys(self):
return [self.angle]
@property
def __priority__(self):
return 1
@property
def description(self):
if self.kind == AngleKindProperty.Kind.acute:
pattern = '$%{angle:angle}$ is acute'
elif self.kind == AngleKindProperty.Kind.obtuse:
pattern = '$%{angle:angle}$ is obtuse'
else:
pattern = '$%{angle:angle}$ is right'
return Comment(pattern, {'angle': self.angle})
def compare_values(self, other):
return self.kind == other.kind
class AngleValueProperty(Property):
"""
Angle value
"""
@staticmethod
def generate(vector0, vector1, value):
def rev(first, second):
vec0 = vector0.reversed if first else vector0
vec1 = vector1.reversed if second else vector1
return vec0.angle(vec1)
if vector0.start == vector1.start:
angles = [(rev(False, False), False)]
elif vector0.start == vector1.end:
angles = [(rev(False, True), True)]
elif vector0.end == vector1.start:
angles = [(rev(True, False), True)]
elif vector0.end == vector1.end:
angles = [(rev(True, True), False)]
else:
angles = [
(rev(False, False), False),
(rev(False, True), | |
<reponame>joschabach/micropsi2<filename>micropsi_core/nodenet/native_modules.py
"""
Builtin native modules
Currently contains
* GradientDescent for 3 layers (input, hidden, outpu)
* GradientDescent for LSTMS
"""
import os
nodetypes = {}
try:
import numpy as np
import theano
numpy_installed = True
except ImportError:
numpy_installed = False
if numpy_installed:
# only register these native modules if we
# have theano and numpy installed.
nodetypes["GradientDescent"] = {
"name": "GradientDescent",
"engine": "theano_engine",
"slottypes": ["gen"],
"gatetypes": ["gen"],
"nodefunction_name": "gradient_descent",
"symbol": "☲",
"category": "nn_learning",
"path": os.path.abspath(__file__),
"parameters": [
"ae_type",
"adadelta_rho",
"adadelta_eps",
"check_grad",
"weight_decay",
"tied_weights",
"sparsity_value",
"sparsity_penalty",
"t",
"ctr",
"input_prefix",
"hidden_prefix",
"output_prefix",
"input_nodespace"
],
"parameter_values": {
"ae_type": ["sparse", "denoising"],
"tied_weights": ["True", "False"],
"check_grad": ["yes", "no"]
},
"parameter_defaults": {
"ae_type": "denoising",
"tied_weights": "True",
"hidden_prefix": "hidden_1",
"output_prefix": "output_1"
}
}
def gradient_descent(netapi, node=None, **params):
"""
Online gradient descent with backpropagation for three layers (input, hidden,
and output layer) and AdaDelta for adapting the learning rate per parameter.
References:
[1] Werbos, PJ. "Beyond Regression: New Tools for Prediction and Analysis
in the Behavioral Sciences." (1974).
[2] Zeiler, MD. "ADADELTA: An adaptive learning rate method." (2012).
[3] <NAME>. "Extracting and Composing Robust Features with Denoising
Autoencoders." (2008).
"""
# To be able to switch this native module on and off, require positive
# activation on the gen slot for its code to be run.
if node.get_slot('gen').activation > 0:
import theano
import theano.tensor as T
# get shared name prefix of nodes in input, hidden, and output layers
input_ = node.get_parameter('input_prefix')
hidden = node.get_parameter('hidden_prefix')
output = node.get_parameter('output_prefix')
# get the name of the nodespace where the input lives
ns_input_name = node.get_parameter('input_nodespace')
# get nodespace uids of nodes in input, hidden, and output layers
# assumption: if the input layer consists of sensor nodes, they have their
# own nodespace, all other nodes are in this node's nodespace
ns_input_uid = None
for ns in netapi.get_nodespaces():
if ns.name == ns_input_name:
ns_input_uid = ns.uid
break
ns_hidden_uid = node.parent_nodespace
ns_output_uid = node.parent_nodespace
# initialization
if not hasattr(node, 'initialized'):
node.set_state('cumulative_error', 0)
sparse = str(node.get_parameter('ae_type')) == "sparse"
# denoising = str(node.get_parameter('ae_type')) == "denoising"
tied_weights = str(node.get_parameter('tied_weights')) == "True"
# group nodes
netapi.group_nodes_by_names(ns_input_uid, node_name_prefix=input_)
netapi.group_nodes_by_names(ns_hidden_uid, node_name_prefix=hidden)
netapi.group_nodes_by_names(ns_output_uid, node_name_prefix=output)
# get activation values
a_i_array = netapi.get_activations(ns_input_uid, input_)
a_h_array = netapi.get_activations(ns_hidden_uid, hidden)
a_o_array = netapi.get_activations(ns_output_uid, output)
node.set_parameter('error', 0.0) # store error values to observe how training develops
len_input = len(a_i_array)
len_hidden = len(a_h_array)
len_output = len(a_o_array)
if len_input == 0:
netapi.logger.warn("Node net has no input nodes whose names start with '%s'", input_)
node.set_parameter('ctr', 0)
return
elif len_hidden == 0:
netapi.logger.warn("Node net has no hidden nodes whose names start with '%s'.", hidden)
node.set_parameter('ctr', 0)
return
elif len_output == 0:
netapi.logger.warn("Node net has no output names whose names start with '%s'.", output)
node.set_parameter('ctr', 0)
return
else:
netapi.logger.info("Initializing theano-based autoencoder backprop with layout: %i -> %i -> %i",
len_input, len_hidden, len_output)
# get parameter values from node net
b_h_array = netapi.get_thetas(ns_hidden_uid, hidden)
b_o_array = netapi.get_thetas(ns_output_uid, output)
w_hi_array = netapi.get_link_weights(ns_input_uid, input_, ns_hidden_uid, hidden)
w_oh_array = netapi.get_link_weights(ns_hidden_uid, hidden, ns_output_uid, output)
# declare shared variables ( shared b/w theano and node nets )
a_i = node.a_i = theano.shared(value=a_i_array.astype(T.config.floatX), name="a_i", borrow=False)
a_h = node.a_h = theano.shared(value=a_h_array.astype(T.config.floatX), name="a_h", borrow=False)
a_o = node.a_o = theano.shared(value=a_o_array.astype(T.config.floatX), name="a_o", borrow=False)
b_h = node.b_h = theano.shared(value=b_h_array.astype(T.config.floatX), name="b_h", borrow=False)
b_o = node.b_o = theano.shared(value=b_o_array.astype(T.config.floatX), name="b_o", borrow=False)
w_hi = node.w_hi = theano.shared(value=w_hi_array.astype(T.config.floatX), name="w_hi", borrow=False)
w_oh = node.w_oh = theano.shared(value=w_oh_array.astype(T.config.floatX), name="w_oh", borrow=False)
# write initial parameter values to shared variables
node.b_h.set_value(b_h_array, borrow=True)
node.b_o.set_value(b_o_array, borrow=True)
node.w_hi.set_value(w_hi_array, borrow=True)
node.w_oh.set_value(w_oh_array, borrow=True)
# initialize accumulation variables for AdaDelta, ie. mean square gradients and mean square deltas
ms_grad_b_o = node.ms_grad_b_o = theano.shared(value=np.zeros_like(b_o_array), name="ms_grad_b_o", borrow=True)
ms_grad_w_oh = node.ms_grad_w_oh = theano.shared(value=np.zeros_like(w_oh_array), name="ms_grad_w_oh", borrow=True)
ms_grad_b_h = node.ms_grad_b_h = theano.shared(value=np.zeros_like(b_h_array), name="ms_grad_b_h", borrow=True)
ms_grad_w_hi = node.ms_grad_w_hi = theano.shared(value=np.zeros_like(w_hi_array), name="ms_grad_w_hi", borrow=True)
ms_delta_b_o = node.ms_delta_b_o = theano.shared(value=np.zeros_like(b_o_array), name="ms_delta_b_o", borrow=True)
ms_delta_w_oh = node.ms_delta_w_oh = theano.shared(value=np.zeros_like(w_oh_array), name="ms_delta_w_oh", borrow=True)
ms_delta_b_h = node.ms_delta_b_h = theano.shared(value=np.zeros_like(b_h_array), name="ms_delta_b_h", borrow=True)
ms_delta_w_hi = node.ms_delta_w_hi = theano.shared(value=np.zeros_like(w_hi_array), name="ms_delta_w_hi", borrow=True)
# make function parameters theano compatible
weight_decay = T.scalar("weight_decay", dtype=T.config.floatX)
sparsity_value = T.scalar("sparsity_value", dtype=T.config.floatX)
sparsity_penalty = T.scalar("sparsity_penalty", dtype=T.config.floatX)
ada_rho = T.scalar("ada_rho", dtype=T.config.floatX)
ada_eps = T.scalar("ada_eps", dtype=T.config.floatX)
# declare the reconstruction error
error_term = T.sum(T.square(a_o - a_i)) / 2. # squared error
# error_term = -T.sum(a_i * T.log(a_o) + (1. - a_i) * T.log(1. - a_o)) # cross-entropy
# use a weight constraint as a regularizer
weight_constraint = (weight_decay / 2.) * (T.sum(T.square(w_hi)) + T.sum(T.square(w_oh)))
if sparse: # training criterion for a sparse autoencoder
# save the average activation of hidden units; initialize to first activation received
avg_a_h = node.avg_a_h = theano.shared(value=a_h_array, name="avg_a_h", borrow=False)
new_avg_a_h = 0.95 * avg_a_h + (1 - 0.95) * a_h # for gradient checking, set new_avg_a_h = a_h
rho = sparsity_value
information_gain = rho * T.log(rho / new_avg_a_h) + (1. - rho) * T.log((1. - rho) / (1. - new_avg_a_h))
sparsity_constraint = sparsity_penalty * T.sum(information_gain)
cost = error_term + weight_constraint + sparsity_constraint
else: # training criterion for a denoising autoencoder
cost = error_term + weight_constraint
node.cost = theano.function([weight_decay, sparsity_value, sparsity_penalty], cost, on_unused_input='ignore')
node.error = theano.function([], error_term / len(b_h_array))
# compute gradients
sigmoid_deriv_a_o = a_o * (1. - a_o)
grad_o = (a_o - a_i) * sigmoid_deriv_a_o # squared error # T.grad(cost, z_o)
# grad_o = ((a_i - a_o) / (a_o - a_o**2)) * sigmoid_deriv_a_o # cross-entropy
sigmoid_deriv_a_h = a_h * (1. - a_h)
if sparse:
grad_w_oh = T.dot(T.reshape(grad_o, (len_input, 1)), T.reshape(a_h, (1, len_hidden))) + weight_decay * w_oh
grad_sparsity = (- rho / new_avg_a_h + (1. - rho) / (1. - new_avg_a_h)).T
grad_h = (T.dot(w_oh.T, grad_o) + sparsity_penalty * grad_sparsity) * sigmoid_deriv_a_h
grad_w_hi = T.dot(T.reshape(grad_h, (len_hidden, 1)), T.reshape(a_i, (1, len_input))) + weight_decay * w_hi
else: # denoising
grad_w_oh = T.dot(T.reshape(grad_o, (len_input, 1)), T.reshape(a_h, (1, len_hidden))) + weight_decay * w_oh
grad_h = T.dot(w_oh.T, grad_o) * sigmoid_deriv_a_h
grad_w_hi = T.dot(T.reshape(grad_h, (len_hidden, 1)), T.reshape(a_i, (1, len_input))) + weight_decay * w_hi
if tied_weights:
grad_w_oh = grad_w_oh + grad_w_hi.T
gradients = [grad_o, grad_w_oh, grad_h]
ms_grad = [ms_grad_b_o, ms_grad_w_oh, ms_grad_b_h]
ms_delta = [ms_delta_b_o, ms_delta_w_oh, ms_delta_b_h]
else:
gradients = [grad_o, grad_w_oh, grad_h, grad_w_hi]
ms_grad = [ms_grad_b_o, ms_grad_w_oh, ms_grad_b_h, ms_grad_w_hi]
ms_delta = [ms_delta_b_o, ms_delta_w_oh, ms_delta_b_h, ms_delta_w_hi]
# update accumulation variables for AdaDelta and compute new deltas
# compute an exponentially decaying average of squared gradients
# ie. recent gradients are more important and the quantity doesn't continue to grow
# thereby allowing the learning rate to grow or shrink as time progresses ( rather than just shrink as in AdaGrad )
new_ms_grad = [ada_rho * ms_g + (1 - ada_rho) * (g**2) for ms_g, g in zip(ms_grad, gradients)]
# Note: the square root of the mean squared gradients plus epsilon is effectively the RMS of the gradients
# epsilon is added ~"to start off the first iteration and to ensure progress when previous updates become small"
deltas = [(T.sqrt(ms_d + ada_eps) / T.sqrt(ms_g + ada_eps)) * g for ms_d, ms_g, g in zip(ms_delta, new_ms_grad, gradients)]
# compute an exponentially decaying average of squared deltas -- this is to ensure correct units
new_ms_delta = [ada_rho * ms_d + (1 - ada_rho) * (d**2) for ms_d, d in zip(ms_delta, deltas)]
# update parameters, ie. old_value - learning_rate * delta_value
if tied_weights:
new_b_o, new_w_oh, new_b_h = (old - update for old, update in zip([b_o, w_oh, b_h], deltas))
new_w_hi = new_w_oh.T
new_ms_grad.append(new_ms_grad[1].T)
new_ms_delta.append(new_ms_delta[1].T)
gradients.append(gradients[1].T)
else:
new_b_o, new_w_oh, new_b_h, new_w_hi = (old - update for old, update in zip([b_o, w_oh, b_h, w_hi], deltas))
if sparse:
update_function = theano.function([weight_decay, sparsity_value, sparsity_penalty, ada_rho, ada_eps],
None,
updates=[(b_o, new_b_o),
(w_oh, new_w_oh),
(b_h, new_b_h),
(w_hi, new_w_hi),
(avg_a_h, new_avg_a_h),
(ms_grad_b_o, new_ms_grad[0]),
(ms_grad_w_oh, new_ms_grad[1]),
(ms_grad_b_h, new_ms_grad[2]),
(ms_grad_w_hi, new_ms_grad[3]),
(ms_delta_b_o, new_ms_delta[0]),
(ms_delta_w_oh, new_ms_delta[1]),
(ms_delta_b_h, new_ms_delta[2]),
(ms_delta_w_hi, new_ms_delta[3])],
on_unused_input='ignore')
else: # denoising
update_function = theano.function([weight_decay, sparsity_value, sparsity_penalty, ada_rho, ada_eps],
None,
updates=[(b_o, new_b_o),
(w_oh, new_w_oh),
(b_h, new_b_h),
(w_hi, new_w_hi),
(ms_grad_b_o, new_ms_grad[0]),
(ms_grad_w_oh, new_ms_grad[1]),
(ms_grad_b_h, new_ms_grad[2]),
(ms_grad_w_hi, new_ms_grad[3]),
(ms_delta_b_o, new_ms_delta[0]),
(ms_delta_w_oh, new_ms_delta[1]),
(ms_delta_b_h, new_ms_delta[2]),
(ms_delta_w_hi, new_ms_delta[3])],
on_unused_input='ignore')
node.get_updated_parameters = update_function
# for gradient checking use the following function:
node.get_gradients = theano.function([weight_decay, sparsity_value, sparsity_penalty, ada_rho, ada_eps],
[gradients[0], gradients[1], gradients[2], gradients[3]], on_unused_input='ignore')
node.initialized = True
# get input | |
<filename>src/panfrost/bifrost/valhall/valhall.py
#encoding=utf-8
# Copyright (C) 2016 Intel Corporation
# Copyright (C) 2016 Broadcom
# Copyright (C) 2020 Collabora, Ltd.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice (including the next
# paragraph) shall be included in all copies or substantial portions of the
# Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
import os
import textwrap
import xml.etree.ElementTree as ET
import sys
tree = ET.parse(os.path.join(os.path.dirname(__file__), 'ISA.xml'))
root = tree.getroot()
# All instructions in the ISA
instructions = []
# All immediates in the ISA
ilut = root.findall('lut')[0]
assert(ilut.attrib['name'] == "Immediates")
immediates = [int(imm.text, base=0) for imm in ilut.findall('constant')]
enums = {}
def xmlbool(s):
assert(s.lower() in ["false", "true"])
return False if s.lower() == "false" else True
class EnumValue:
def __init__(self, value, default):
self.value = value
self.default = default
class Enum:
def __init__(self, name, values):
self.name = name
self.values = values
self.bare_values = [x.value for x in values]
defaults = [x.value for x in values if x.default]
if len(defaults) > 0:
assert(len(defaults) == 1)
self.default = defaults[0]
def build_enum(el):
values = []
for child in el:
if child.tag == 'value':
is_default = child.attrib.get('default', False)
values.append(EnumValue(child.text, is_default))
elif child.tag == 'reserved':
values.append(EnumValue("reserved", False))
return Enum(el.attrib['name'], values)
class Modifier:
def __init__(self, name, start, size, implied = False):
self.name = name
self.start = start
self.size = size
self.implied = implied
if size == 1:
self.bare_values = ['', name]
self.default = 0
else:
enum = enums[name]
self.bare_values = [x.value for x in enum.values]
defaults = [x for x in enum.values if x.default]
assert(len(defaults) <= 1)
if len(defaults) > 0:
self.default = self.bare_values.index(defaults[0].value)
else:
self.default = None
def Flag(name, start):
return Modifier(name, start, 1)
# Model a single instruction
class Source:
def __init__(self, index, size, is_float = False, swizzle = False, halfswizzle = False, widen = False, lanes = False, lane = None, absneg = False, notted = False, name = ""):
self.is_float = is_float or absneg
self.size = size
self.absneg = absneg
self.notted = notted
self.swizzle = swizzle
self.halfswizzle = halfswizzle
self.widen = widen
self.lanes = lanes
self.lane = lane
self.name = name
self.offset = {}
self.bits = {}
if absneg:
self.offset['neg'] = 32 + 2 + ((2 - index) * 2)
self.offset['abs'] = 33 + 2 + ((2 - index) * 2)
self.bits['neg'] = 1
self.bits['abs'] = 1
if notted:
self.offset['not'] = 35
self.bits['not'] = 1
if widen or lanes or halfswizzle:
self.offset['widen'] = 26 if index == 1 else 36
self.bits['widen'] = 4 # XXX: too much?
if lane:
self.offset['lane'] = self.lane
self.bits['lane'] = 2 if size in (8, 32) else 1
if swizzle:
assert(size in [16, 32])
self.offset['swizzle'] = 24 + ((2 - index) * 2)
self.bits['swizzle'] = 2
class Dest:
def __init__(self, name = ""):
self.name = name
class Staging:
def __init__(self, read = False, write = False, index = 0, count = 0, flags = True, name = ""):
self.name = name
self.read = read
self.write = write
self.count = count
self.flags = flags
# For compatibility
self.absneg = False
self.swizzle = False
self.notted = False
self.widen = False
self.lanes = False
self.lane = False
self.size = 32
assert(index < 2)
self.start = 40 if index == 0 else 16
if not flags:
self.encoded_flags = 0
elif index > 0:
self.encoded_flags = 0xC0
else:
self.encoded_flags = (0x80 if write else 0) | (0x40 if read else 0)
class Immediate:
def __init__(self, name, start, size, signed):
self.name = name
self.start = start
self.size = size
self.signed = signed
class Instruction:
def __init__(self, name, opcode, opcode2, srcs = [], dests = [], immediates = [], modifiers = [], staging = None):
self.name = name
self.srcs = srcs
self.dests = dests
self.opcode = opcode
self.opcode2 = opcode2 or 0
self.immediates = immediates
self.modifiers = modifiers
self.staging = staging
self.secondary_shift = max(len(self.srcs) * 8, 16)
self.secondary_mask = 0xF if opcode2 is not None else 0x0
if "left" in [x.name for x in self.modifiers]:
self.secondary_mask |= 0x100
if len(srcs) == 3 and (srcs[1].widen or srcs[1].lanes):
self.secondary_mask &= ~0xC # conflicts
if opcode == 0x90:
# XXX: XMLify this, but disambiguates sign of conversions
self.secondary_mask |= 0x10
if name.startswith("LOAD.i") or name.startswith("STORE.i") or name.startswith("LD_BUFFER.i"):
self.secondary_shift = 27 # Alias with memory_size
self.secondary_mask = 0x7
assert(len(dests) == 0 or not staging)
assert(not opcode2 or (opcode2 & self.secondary_mask) == opcode2)
def __str__(self):
return self.name
# Build a single source from XML
def build_source(el, i, size):
lane = el.get('lane', None)
if lane == "true":
lane = 38 if i == 0 else 36
elif lane is not None:
lane = int(lane)
return Source(i, int(el.get('size', size)),
absneg = el.get('absneg', False),
is_float = el.get('float', False),
swizzle = el.get('swizzle', False),
halfswizzle = el.get('halfswizzle', False),
widen = el.get('widen', False),
lanes = el.get('lanes', False),
lane = lane,
notted = el.get('not', False),
name = el.text or "")
def build_imm(el):
return Immediate(el.attrib['name'], int(el.attrib['start']),
int(el.attrib['size']), bool(el.attrib.get('signed', False)))
def build_staging(i, el):
r = xmlbool(el.attrib.get('read', 'false'))
w = xmlbool(el.attrib.get('write', 'false'))
count = int(el.attrib.get('count', '0'))
flags = xmlbool(el.attrib.get('flags', 'true'))
return Staging(r, w, i, count, flags, el.text or '')
def build_modifier(el):
name = el.attrib['name']
start = int(el.attrib['start'])
size = int(el.attrib['size'])
implied = xmlbool(el.get('implied', 'false'))
return Modifier(name, start, size, implied)
# Build a single instruction from XML and group based overrides
def build_instr(el, overrides = {}):
# Get overridables
name = overrides.get('name') or el.attrib.get('name')
opcode = overrides.get('opcode') or el.attrib.get('opcode')
opcode2 = overrides.get('opcode2') or el.attrib.get('opcode2')
opcode = int(opcode, base=0)
opcode2 = int(opcode2, base=0) if opcode2 else None
# Get explicit sources/dests
tsize = typesize(name)
sources = [build_source(src, i, tsize) for i, src in enumerate(el.findall('src'))]
dests = [Dest(dest.text or '') for dest in el.findall('dest')]
# Get implicit ones
sources = sources + ([Source(i, int(tsize)) for i in range(int(el.attrib.get('srcs', 0)))])
dests = dests + ([Dest()] * int(el.attrib.get('dests', 0)))
# Get staging registers
staging = [build_staging(i, el) for i, el in enumerate(el.findall('sr'))]
# Get immediates
imms = [build_imm(imm) for imm in el.findall('imm')]
modifiers = []
for mod in el:
if mod.tag in MODIFIERS:
modifiers.append(MODIFIERS[mod.tag])
elif mod.tag =='mod':
modifiers.append(build_modifier(mod))
instr = Instruction(name, opcode, opcode2, srcs = sources, dests = dests, immediates = imms, modifiers = modifiers, staging = staging)
instructions.append(instr)
# Build all the instructions in a group by duplicating the group itself with
# overrides for each distinct instruction
def build_group(el):
for ins in el.findall('ins'):
build_instr(el, overrides = {
'name': ins.attrib['name'],
'opcode': ins.attrib.get('opcode'),
'opcode2': ins.attrib.get('opcode2'),
})
def to_alphanum(name):
substitutions = {
' ': '_',
'/': '_',
'[': '',
']': '',
'(': '',
')': '',
'-': '_',
':': '',
'.': '',
',': '',
'=': '',
'>': '',
'#': '',
'&': '',
'*': '',
'"': '',
'+': '',
'\'': '',
}
for i, j in substitutions.items():
name = name.replace(i, j)
return name
def safe_name(name):
name = to_alphanum(name)
if not name[0].isalpha():
name = '_' + name
return name.lower()
# Parses out the size part of an opocde name
def typesize(opcode):
if opcode[-3:] == '128':
return 128
if opcode[-2:] == '48':
return 48
elif opcode[-1] == '8':
return 8
else:
try:
return int(opcode[-2:])
except:
return 32
for child in root.findall('enum'):
enums[safe_name(child.attrib['name'])] = build_enum(child)
MODIFIERS = {
"inactive_result": Modifier("inactive_result", 22, 4),
"store_segment": Modifier("store_segment", 24, 2),
"regfmt": Modifier("register_format", 24, 3),
"vecsize": Modifier("vector_size", 28, 2),
"slot": Modifier("slot", 30, 3),
"roundmode": Modifier("round_mode", 30, 2),
"result_type": Modifier("result_type", 30, 2),
"saturate": Flag("saturate", 30),
"not_result": Flag("not_result", 30),
"lane_op": Modifier("lane_operation", 32, 2),
"cmp": Modifier("condition", 32, 3),
"clamp": Modifier("clamp", 32, 2),
"sr_count": | |
<filename>rpython/jit/backend/llvm/llvm_api.py<gh_stars>1-10
from rpython.rtyper.lltypesystem import rffi, lltype
from rpython.translator.tool.cbuild import ExternalCompilationInfo
from rpython.rtyper.lltypesystem.rffi import str2constcharp, constcharp2str
class LLVMAPI:
def __init__(self, debug=False):
self.debug = debug #disable in prod to prevent castings and comparisons of returned values
self.define_types()
self.initialise_api()
def define_types(self):
"""
LLVM uses polymorphic types which C can't represent,
so LLVM-C doesn't define them with concrete/primitive types.
As such we have to refer to most of them with void pointers,
but as the LLVM API also manages memory deallocation for us,
this is likely the simplest choice anyway.
"""
self.Void = lltype.Void
self.VoidPtr = rffi.VOIDP
self.VoidPtrPtr = rffi.VOIDPP
self.ModuleRef = self.VoidPtr
self.TypeRef = self.VoidPtr
self.TypeRefPtr = self.VoidPtrPtr
self.ContextRef = self.VoidPtr
self.ValueRef = self.VoidPtr
self.ValueRefPtr = self.VoidPtrPtr
self.GenericValueRef = self.VoidPtr
self.BasicBlockRef = self.VoidPtr
self.BuilderRef = self.VoidPtr
self.TargetDataRef = self.VoidPtr
self.Enum = lltype.Signed
self.Bool = lltype.Signed #LLVMBOOL is typedefed to int32
self.Str = rffi.CONST_CCHARP
self.VerifierFailureAction = self.Enum
self.RealPredicate = self.Enum
self.IntPredicate = self.Enum
self.TargetDataRef = self.VoidPtr
self.JITDylibRef = self.VoidPtr
self.ThreadSafeModuleRef = self.VoidPtr
self.ThreadSafeContextRef = self.VoidPtr
self.LLJITBuilderRef = self.VoidPtr
self.LLJITRef = self.VoidPtr
self.LLJITRefPtr = self.VoidPtrPtr
self.ErrorRef = self.VoidPtr
self.ExecutionSessionRef = self.VoidPtr
self.JITTargetAddress = self.VoidPtr
self.PassManagerRef = self.VoidPtrPtr
self.JITTargetMachineBuilderRef = self.VoidPtr
self.TargetMachineRef = self.VoidPtr
self.TargetRef = self.VoidPtr
self.PassManagerRef = self.VoidPtr
self.MetadataRef = self.VoidPtr
self.ExecutionSessionRef = self.VoidPtr
self.ObjectLayerRef = self.VoidPtr
self.MemoryManagerFactoryFunction = self.VoidPtr
self.ObjectLinkingLayerCreatorFunction = self.VoidPtr
self.JITEnums = lltype.Struct('JITEnums', ('codegenlevel', lltype.Signed), ('reloc', lltype.Signed), ('codemodel', lltype.Signed))
self.CmpEnums = lltype.Struct('CmpEnums', ('inteq', lltype.Signed), ('intne', lltype.Signed), ('intugt', lltype.Signed), ('intuge', lltype.Signed), ('intult', lltype.Signed), ('intule', lltype.Signed), ('intsgt', lltype.Signed), ('intsge', lltype.Signed), ('intslt', lltype.Signed), ('intsle', lltype.Signed), ('realeq', lltype.Signed), ('realne', lltype.Signed), ('realgt', lltype.Signed), ('realge', lltype.Signed), ('reallt', lltype.Signed), ('realle', lltype.Signed),('realord', lltype.Signed))
def initialise_api(self):
header_files = ["Core","Target","Analysis","DataTypes",
"Error","ErrorHandling","ExternC",
"Initialization","Orc","TargetMachine","Types",
"LLJIT","OrcEE"]
llvm_c = ["llvm-c/"+f+".h" for f in header_files]
cflags = ["""-I/usr/lib/llvm/12/include -D_GNU_SOURCE
-D__STDC_CONSTANT_MACROS -D__STDC_FORMAT_MACROS
-D__STDC_LIMIT_MACROS"""] #know this should be in the includes arg, but llvm is weird and only works this way
path = "/home/muke/Programming/Project/pypy/rpython/jit/backend/llvm/llvm_wrapper/" #TODO: get real path
path2 = "/home/muke/Programming/Project/pypy/rpython/jit/backend/llvm/" #wrapper libs need to be in the same directory as the python file, don't ask why
info = ExternalCompilationInfo(includes=llvm_c+[path2+"wrapper.h"],
libraries=["LLVM-12","wrapper"],
include_dirs=["/usr/lib/llvm/12/lib64",
"/usr/lib/llvm/12/include",path],
library_dirs=["/usr/lib/llvm/12/lib64",path],
compile_extra=cflags, link_extra=cflags) #TODO: make this platform independant (rather than hardcoding the output of llvm-config for my system)
self.CreateModule = rffi.llexternal("LLVMModuleCreateWithNameInContext",
[self.Str, self.ContextRef], self.ModuleRef,
compilation_info=info)
self.FunctionType = rffi.llexternal("LLVMFunctionType",
[self.TypeRef, self.TypeRefPtr,
lltype.Unsigned, self.Bool],
self.TypeRef, compilation_info=info)
self.AddFunction = rffi.llexternal("LLVMAddFunction",
[self.ModuleRef, self.Str, self.TypeRef],
self.ValueRef, compilation_info=info)
self.AppendBasicBlock = rffi.llexternal("LLVMAppendBasicBlockInContext",
[self.ContextRef, self.ValueRef, self.Str],
self.BasicBlockRef,
compilation_info=info)
self.CreateBuilder = rffi.llexternal("LLVMCreateBuilderInContext",
[self.ContextRef], self.BuilderRef,
compilation_info=info)
self.PositionBuilderAtEnd = rffi.llexternal("LLVMPositionBuilderAtEnd",
[self.BuilderRef,
self.BasicBlockRef], self.Void,
compilation_info=info)
self.BuildAdd = rffi.llexternal("LLVMBuildAdd",
[self.BuilderRef, self.ValueRef,
self.ValueRef, self.Str],
self.ValueRef, compilation_info=info)
self.BuildFAdd = rffi.llexternal("LLVMBuildAdd",
[self.BuilderRef, self.ValueRef,
self.ValueRef, self.Str],
self.ValueRef, compilation_info=info)
self.BuildRet = rffi.llexternal("LLVMBuildRet",
[self.BuilderRef, self.ValueRef],
self.ValueRef, compilation_info=info)
self.GetInsertBlock = rffi.llexternal("LLVMGetInsertBlock",
[self.BuilderRef],
self.BasicBlockRef, compilation_info=info)
self.GetParam = rffi.llexternal("LLVMGetParam",
[self.ValueRef, lltype.Signed],
self.ValueRef, compilation_info=info)
self.VerifyModule = rffi.llexternal("VerifyModule",
[self.ModuleRef],
self.Bool,
compilation_info=info)
self.DisposeMessage = rffi.llexternal("LLVMDisposeMessage",
[self.Str], self.Void,
compilation_info=info)
self.DisposeBuilder = rffi.llexternal("LLVMDisposeBuilder",
[self.BuilderRef], self.Void,
compilation_info=info)
self.DisposeModule = rffi.llexternal("LLVMDisposeModule",
[self.ModuleRef], self.Void,
compilation_info=info)
self.IntType = rffi.llexternal("LLVMIntTypeInContext",
[self.ContextRef, lltype.Unsigned],
self.TypeRef,
compilation_info=info)
self.ConstInt = rffi.llexternal("LLVMConstInt",
[self.TypeRef, lltype.UnsignedLongLong,
self.Bool], self.ValueRef,
compilation_info=info)
self.InitializeCore = rffi.llexternal("LLVMInitializeCore",
[self.Void], self.Bool,
compilation_info=info)
self.BuildPhi = rffi.llexternal("LLVMBuildPhi",
[self.BuilderRef, self.TypeRef, self.Str],
self.ValueRef, compilation_info=info)
self.GetInsertBlock = rffi.llexternal("LLVMGetInsertBlock",
[self.BuilderRef], self.BasicBlockRef,
compilation_info=info)
self.PositionBuilderAtEnd = rffi.llexternal("LLVMPositionBuilderAtEnd",
[self.BuilderRef,
self.BasicBlockRef],
self.Void, compilation_info=info)
self.BuildFCmp = rffi.llexternal("LLVMBuildFCmp",
[self.BuilderRef, self.RealPredicate,
self.ValueRef, self.ValueRef,
self.Str], self.ValueRef,
compilation_info=info)
self.BuildICmp = rffi.llexternal("LLVMBuildICmp",
[self.BuilderRef, lltype.Signed,
self.ValueRef, self.ValueRef,
self.Str], self.ValueRef,
compilation_info=info)
self.CreateBasicBlock = rffi.llexternal("LLVMCreateBasicBlockInContext",
[self.ContextRef, self.Str],
self.BasicBlockRef,
compilation_info=info)
self.GetParent = rffi.llexternal("LLVMGetBasicBlockParent",
[self.BasicBlockRef], self.ValueRef,
compilation_info=info)
self.AddIncoming = rffi.llexternal("AddIncoming",
[self.ValueRef, self.ValueRef,
self.BasicBlockRef],
self.Void, compilation_info=info)
self.BuildBr = rffi.llexternal("LLVMBuildBr",
[self.BuilderRef, self.BasicBlockRef],
self.ValueRef, compilation_info=info)
self.BuildCondBr = rffi.llexternal("LLVMBuildCondBr",
[self.BuilderRef, self.ValueRef,
self.BasicBlockRef, self.BasicBlockRef],
self.ValueRef, compilation_info=info)
self.GetDataLayout = rffi.llexternal("LLVMGetDataLayoutStr",
[self.ModuleRef], self.Str,
compilation_info=info)
self.SetModuleDataLayout = rffi.llexternal("LLVMSetModuleDataLayout",
[self.ModuleRef,
self.TargetDataRef],
self.Void, compilation_info=info)
self.CreateTargetData = rffi.llexternal("LLVMCreateTargetData",
[self.Str], self.TargetDataRef,
compilation_info=info)
self.InitializeNativeTarget = rffi.llexternal("InitializeNativeTarget",
[self.Void], self.Bool,
compilation_info=info)
self.InitializeNativeAsmPrinter = rffi.llexternal("InitializeNativeAsmPrinter",
[self.Void], self.Bool,
compilation_info=info)
self.CreateThreadSafeModule = rffi.llexternal("LLVMOrcCreateNewThreadSafeModule",
[self.ModuleRef,
self.ThreadSafeContextRef],
self.ThreadSafeModuleRef,
compilation_info=info)
self.CreateThreadSafeContext = rffi.llexternal("LLVMOrcCreateNewThreadSafeContext",
[self.Void],
self.ThreadSafeContextRef,
compilation_info=info)
self.GetContext = rffi.llexternal("LLVMOrcThreadSafeContextGetContext",
[self.ThreadSafeContextRef],
self.ContextRef,
compilation_info=info)
self.LLJITLookup = rffi.llexternal("LLJITLookup",
[self.LLJITRef,
self.Str], self.JITTargetAddress,
compilation_info=info)
self.LLJITAddModule = rffi.llexternal("LLVMOrcLLJITAddLLVMIRModule",
[self.LLJITRef,
self.JITDylibRef,
self.ThreadSafeModuleRef],
self.ErrorRef,
compilation_info=info)
self.LLJITGetMainJITDylib = rffi.llexternal("LLVMOrcLLJITGetMainJITDylib",
[self.LLJITRef],
self.JITDylibRef,
compilation_info=info)
self.LLJITGetExecutionSession = rffi.llexternal("LLVMOrcExecutionSessionRef",
[self.LLJITRef],
self.ExecutionSessionRef,
compilation_info=info)
self.CreateLLJIT = rffi.llexternal("CreateLLJIT",
[self.LLJITBuilderRef],
self.LLJITRef,
compilation_info=info)
self.CreateLLJITBuilder = rffi.llexternal("LLVMOrcCreateLLJITBuilder",
[self.Void],
self.LLJITBuilderRef,
compilation_info=info)
self.CreatePassManager = rffi.llexternal("LLVMCreatePassManager",
[self.Void],
self.PassManagerRef,
compilation_info=info)
self.RunPassManager = rffi.llexternal("LLVMRunPassManager",
[self.PassManagerRef,
self.ModuleRef], self.Bool,
compilation_info=info)
self.LLJITBuilderSetJITTargetMachineBuilder = rffi.llexternal("LLVMOrcLLJITBuilderSetJITTargetMachineBuilder",
[self.LLJITBuilderRef,
self.JITTargetMachineBuilderRef],
self.Void,
compilation_info=info)
self.JITTargetMachineBuilderCreateFromTargetMachine = rffi.llexternal("LLVMOrcJITTargetMachineBuilderCreateFromTargetMachine",
[self.TargetMachineRef],
self.JITTargetMachineBuilderRef,
compilation_info=info)
self.GetHostCPUName = rffi.llexternal("LLVMGetHostCPUName",
[self.Void],
self.Str,
compilation_info=info)
self.GetHostCPUFeatures = rffi.llexternal("LLVMGetHostCPUFeatures",
[self.Void],
self.Str,
compilation_info=info)
self.GetHostCPUFeatures = rffi.llexternal("LLVMGetHostCPUFeatures",
[self.Void],
self.Str,
compilation_info=info)
self.CreateTargetMachine = rffi.llexternal("LLVMCreateTargetMachine",
[self.TargetRef,
self.Str, self.Str,
self.Str, self.Enum,
self.Enum, self.Enum],
self.TargetMachineRef,
compilation_info=info)
self.GetTarget = rffi.llexternal("GetTargetFromTriple",
[self.Str],
self.TargetRef,
compilation_info=info)
self.CreateTargetDataLayout = rffi.llexternal("LLVMCreateTargetDataLayout",
[self.TargetMachineRef],
self.TargetDataRef,
compilation_info=info)
self.GetTargetTriple = rffi.llexternal("LLVMGetDefaultTargetTriple",
[self.Void],
self.Str,
compilation_info=info)
self.GetParam = rffi.llexternal("LLVMGetParam",
[self.ValueRef, lltype.Signed],
self.ValueRef,
compilation_info=info)
self.PositionBuilderBefore = rffi.llexternal("LLVMPositionBuilderBefore",
[self.BuilderRef,
self.BasicBlockRef],
self.Void,
compilation_info=info)
self.EraseInstruction = rffi.llexternal("LLVMInstructionEraseFromParent",
[self.ValueRef],
self.Void,
compilation_info=info)
self.GetFirstInstruction = rffi.llexternal("LLVMGetFirstInstruction",
[self.BasicBlockRef],
self.ValueRef,
compilation_info=info)
self.CloneModule = rffi.llexternal("LLVMCloneModule",
[self.ModuleRef],
self.ModuleRef,
compilation_info=info)
self.TypeOf = rffi.llexternal("LLVMTypeOf",
[self.ValueRef],
self.TypeRef,
compilation_info=info)
self.VoidType = rffi.llexternal("LLVMVoidTypeInContext",
[self.ContextRef],
self.TypeRef,
compilation_info=info)
self.StructType = rffi.llexternal("LLVMStructTypeInContext",
[self.ContextRef, self.TypeRefPtr,
lltype.Unsigned, self.Bool],
self.TypeRef,
compilation_info=info)
self.ArrayType = rffi.llexternal("LLVMArrayType",
[self.TypeRef, lltype.Unsigned],
self.TypeRef,
compilation_info=info)
self.PointerType = rffi.llexternal("LLVMPointerType",
[self.TypeRef, lltype.Unsigned],
self.TypeRef,
compilation_info=info)
self.BuildStructGEP = rffi.llexternal("LLVMBuildStructGEP2",
[self.BuilderRef, self.TypeRef,
self.ValueRef, lltype.Unsigned,
self.Str],
self.ValueRef,
compilation_info=info)
self.BuildGEP = rffi.llexternal("LLVMBuildGEP2",
[self.BuilderRef, self.TypeRef,
self.ValueRef, self.ValueRefPtr,
lltype.Unsigned, self.Str],
self.ValueRef,
compilation_info=info)
self.BuildGEP1D = rffi.llexternal("BuildGEP1D", #wrappers for common cases so can avoid rffi malloc each call
[self.BuilderRef, self.TypeRef,
self.ValueRef, self.ValueRef,
self.Str],
self.ValueRef,
compilation_info=info)
self.BuildGEP2D = rffi.llexternal("BuildGEP2D",
[self.BuilderRef, self.TypeRef,
self.ValueRef, self.ValueRef,
self.ValueRef, self.Str],
self.ValueRef,
compilation_info=info)
self.BuildGEP3D = rffi.llexternal("BuildGEP3D",
[self.BuilderRef, self.TypeRef,
self.ValueRef, self.ValueRef,
self.ValueRef, self.ValueRef,
self.Str],
self.ValueRef,
compilation_info=info)
self.BuildLoad = rffi.llexternal("LLVMBuildLoad2",
[self.BuilderRef, self.TypeRef,
self.ValueRef, self.Str],
self.ValueRef,
compilation_info=info)
self.BuildStore = rffi.llexternal("LLVMBuildStore",
[self.BuilderRef, self.ValueRef,
self.ValueRef],
self.ValueRef,
compilation_info=info)
self.BuildBitCast = rffi.llexternal("LLVMBuildBitCast",
[self.BuilderRef, self.ValueRef,
self.TypeRef, self.Str],
self.ValueRef,
compilation_info=info)
self.BuildIntToPtr = rffi.llexternal("LLVMBuildIntToPtr",
[self.BuilderRef, self.ValueRef,
self.TypeRef, self.Str],
self.ValueRef,
compilation_info=info)
self.BuildPtrToInt = rffi.llexternal("LLVMBuildPtrToInt",
[self.BuilderRef, self.ValueRef,
self.TypeRef, self.Str],
self.ValueRef,
compilation_info=info)
self.WriteBitcodeToFile = rffi.llexternal("LLVMWriteBitcodeToFile",
[self.ModuleRef, self.Str],
self.ValueRef,
compilation_info=info)
self.BuildAlloca = rffi.llexternal("LLVMBuildAlloca",
[self.BuilderRef, self.TypeRef,
self.Str],
self.ValueRef,
compilation_info=info)
self.PositionBuilderBefore = rffi.llexternal("LLVMPositionBuilderBefore",
[self.BuilderRef, self.ValueRef],
self.Void,
compilation_info=info)
self.GetFirstInstruction = rffi.llexternal("LLVMGetFirstInstruction",
[self.BasicBlockRef],
self.ValueRef,
compilation_info=info)
self.BuildMemCpy = rffi.llexternal("LLVMBuildMemCpy",
[self.BuilderRef, self.ValueRef,
lltype.Unsigned, self.ValueRef,
lltype.Unsigned, self.ValueRef],
self.ValueRef,
compilation_info=info)
self.CreatePassManager = rffi.llexternal("LLVMCreatePassManager",
[self.Void],
self.PassManagerRef,
compilation_info=info)
self.RunPassManager = rffi.llexternal("LLVMRunPassManager",
[self.PassManagerRef,
self.ModuleRef],
self.Bool,
compilation_info=info)
self.AddInstructionCombiningPass = rffi.llexternal("LLVMAddInstructionCombiningPass",
[self.PassManagerRef],
self.Void,
compilation_info=info)
self.AddReassociatePass = rffi.llexternal("LLVMAddReassociatePass",
[self.PassManagerRef],
self.Void,
compilation_info=info)
self.AddGVNPass = rffi.llexternal("LLVMAddGVNPass",
[self.PassManagerRef],
self.Void,
compilation_info=info)
self.AddCFGSimplificationPass = rffi.llexternal("LLVMAddCFGSimplificationPass",
[self.PassManagerRef],
self.Void,
compilation_info=info)
self.AddPromoteMemoryToRegisterPass = rffi.llexternal("LLVMAddPromoteMemoryToRegisterPass",
[self.PassManagerRef],
self.Void,
compilation_info=info)
self.AddPromoteMemoryToRegisterPass = rffi.llexternal("LLVMAddPromoteMemoryToRegisterPass",
[self.PassManagerRef],
self.Void,
compilation_info=info)
self.AddIndVarSimplifyPass = rffi.llexternal("LLVMAddIndVarSimplifyPass",
[self.PassManagerRef],
self.Void,
compilation_info=info)
self.AddScalarReplAggregatesPass = rffi.llexternal("LLVMAddScalarReplAggregatesPass",
[self.PassManagerRef],
self.Void,
compilation_info=info)
self.AddScalarReplAggregatesPass = rffi.llexternal("LLVMAddScalarReplAggregatesPass",
[self.PassManagerRef],
self.Void,
compilation_info=info)
self.GetSubtypes = rffi.llexternal("LLVMGetSubtypes",
[self.TypeRef, self.TypeRefPtr],
self.Void,
compilation_info=info)
self.DeleteBasicBlock = rffi.llexternal("LLVMDeleteBasicBlock",
[self.BasicBlockRef],
self.Void,
compilation_info=info)
self.BuildZExt = rffi.llexternal("LLVMBuildZExt",
[self.BuilderRef, self.ValueRef,
self.TypeRef, self.Str],
self.ValueRef,
compilation_info=info)
self.SizeOf = rffi.llexternal("GetSizeOf",
[self.TypeRef],
lltype.SignedLongLong,
compilation_info=info)
self.DeleteBasicBlock = rffi.llexternal("LLVMDeleteBasicBlock",
[self.BasicBlockRef],
lltype.Void,
compilation_info=info)
self.DisposeLLJIT = rffi.llexternal("LLVMOrcDisposeLLJIT",
[self.LLJITRef],
self.ErrorRef,
compilation_info=info)
self.GetErrorMessage = rffi.llexternal("LLVMGetErrorMessage",
[self.ErrorRef],
self.Str,
compilation_info=info)
self.FloatType = rffi.llexternal("LLVMDoubleTypeInContext",
[self.ContextRef],
self.TypeRef,
compilation_info=info)
self.SingleFloatType = rffi.llexternal("LLVMFloatTypeInContext",
[self.ContextRef],
self.TypeRef,
compilation_info=info)
self.ConstFloat = rffi.llexternal("LLVMConstReal",
[self.TypeRef, lltype.Float],
self.ValueRef,
compilation_info=info)
self.BuildFAdd = rffi.llexternal("LLVMBuildFAdd",
[self.BuilderRef, self.ValueRef,
self.ValueRef, self.Str],
self.ValueRef,
compilation_info=info)
self.PrintValue = rffi.llexternal("LLVMPrintValueToString",
[self.ValueRef],
self.Str,
compilation_info=info)
self.BuildSub = rffi.llexternal("LLVMBuildSub",
[self.BuilderRef, self.ValueRef,
self.ValueRef, self.Str],
self.ValueRef,
compilation_info=info)
self.BuildFSub = rffi.llexternal("LLVMBuildFSub",
[self.BuilderRef, self.ValueRef,
self.ValueRef, self.Str],
self.ValueRef,
compilation_info=info)
self.BuildMul = rffi.llexternal("LLVMBuildMul",
[self.BuilderRef, self.ValueRef,
self.ValueRef, self.Str],
self.ValueRef,
compilation_info=info)
self.BuildFMul = rffi.llexternal("LLVMBuildFMul",
[self.BuilderRef, self.ValueRef,
self.ValueRef, self.Str],
self.ValueRef,
compilation_info=info)
self.BuildFMul = rffi.llexternal("LLVMBuildFMul",
[self.BuilderRef, self.ValueRef,
self.ValueRef, self.Str],
self.ValueRef,
compilation_info=info)
self.BuildFDiv = rffi.llexternal("LLVMBuildFDiv",
[self.BuilderRef, self.ValueRef,
self.ValueRef, self.Str],
self.ValueRef,
compilation_info=info)
self.BuildAnd = rffi.llexternal("LLVMBuildAnd",
[self.BuilderRef, self.ValueRef,
self.ValueRef, self.Str],
self.ValueRef,
compilation_info=info)
self.BuildOr = rffi.llexternal("LLVMBuildOr",
[self.BuilderRef, self.ValueRef,
self.ValueRef, self.Str],
self.ValueRef,
compilation_info=info)
self.BuildXor = rffi.llexternal("LLVMBuildXor",
[self.BuilderRef, self.ValueRef,
self.ValueRef, self.Str],
self.ValueRef,
compilation_info=info)
self.BuildFNeg = rffi.llexternal("LLVMBuildFNeg",
[self.BuilderRef, self.ValueRef,
self.Str],
self.ValueRef,
compilation_info=info)
self.BuildLShl = rffi.llexternal("LLVMBuildShl",
[self.BuilderRef, self.ValueRef,
self.ValueRef, self.Str],
self.ValueRef,
compilation_info=info)
self.BuildURShl = rffi.llexternal("LLVMBuildLShr",
[self.BuilderRef, self.ValueRef,
self.ValueRef, self.Str],
self.ValueRef,
compilation_info=info)
self.BuildRShl = rffi.llexternal("LLVMBuildAShr",
[self.BuilderRef, self.ValueRef,
self.ValueRef, self.Str],
self.ValueRef,
compilation_info=info)
self.BuildSExt = rffi.llexternal("LLVMBuildSExt",
[self.BuilderRef, self.ValueRef,
self.TypeRef, self.Str],
self.ValueRef,
compilation_info=info)
self.SetJITEnums = rffi.llexternal("SetJITEnums",
[lltype.Ptr(self.JITEnums)],
self.Void,
compilation_info=info)
self.SetCmpEnums = rffi.llexternal("SetCmpEnums",
[lltype.Ptr(self.CmpEnums)],
self.Void,
compilation_info=info)
self.getResultElementType = rffi.llexternal("getResultElementType",
[self.ValueRef],
self.TypeRef,
compilation_info=info)
self.DumpValue = rffi.llexternal("LLVMDumpValue",
[self.ValueRef],
self.Void,
compilation_info=info)
self.removeIncomingValue = rffi.llexternal("removeIncomingValue",
[self.ValueRef,
self.BasicBlockRef],
self.ValueRef,
compilation_info=info)
self.removePredecessor = rffi.llexternal("removePredecessor",
[self.BasicBlockRef,
self.BasicBlockRef],
self.Void,
compilation_info=info)
self.getFirstNonPhi = rffi.llexternal("getFirstNonPhi",
[self.BasicBlockRef],
self.Void,
compilation_info=info)
self.splitBasicBlockAtPhi = rffi.llexternal("splitBasicBlockAtPhi",
[self.BasicBlockRef],
self.BasicBlockRef,
compilation_info=info)
self.getTerminator = rffi.llexternal("getTerminator",
[self.BasicBlockRef],
self.ValueRef,
compilation_info=info)
self.DumpModule = rffi.llexternal("LLVMDumpModule",
[self.ModuleRef],
self.Void,
compilation_info=info)
self.dumpBasicBlock = rffi.llexternal("dumpBasicBlock",
[self.ModuleRef],
self.Void,
compilation_info=info)
self.getIncomingValueForBlock = rffi.llexternal("getIncomingValueForBlock",
[self.ValueRef,
self.BasicBlockRef],
self.ValueRef,
compilation_info=info)
self.GetLastInstruction = rffi.llexternal("LLVMGetLastInstruction",
[self.BasicBlockRef],
self.ValueRef,
compilation_info=info)
self.BuildPtrDiff = rffi.llexternal("LLVMBuildPtrDiff",
[self.BuilderRef, | |
import argparse
import time
import pickle
import os
import json
import numpy as np
# import matplotlib.pyplot as plt
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
import tensorflow as tf
import tensorflow_probability as tfp
from utils import plot_mnist, generate_init_inducing_points, import_rotated_mnist, \
print_trainable_vars, parse_opt_regime, compute_bias_variance_mean_estimators, \
make_checkpoint_folder, pandas_res_saver, latent_samples_SVGPVAE, latent_samples_VAE_full_train
from VAE_utils import mnistVAE, mnistCVAE, SVIGP_Hensman_decoder
from SVGPVAE_model import forward_pass_SVGPVAE, mnistSVGP, forward_pass_standard_VAE_rotated_mnist, \
batching_encode_SVGPVAE, batching_encode_SVGPVAE_full, \
bacthing_predict_SVGPVAE_rotated_mnist, predict_CVAE
from GPVAE_Casale_model import encode, casaleGP, forward_pass_Casale, predict_test_set_Casale, sort_train_data
from SVIGP_Hensman_model import SVIGP_Hensman, forward_pass_deep_SVIGP_Hensman, predict_deep_SVIGP_Hensman
tfd = tfp.distributions
tfk = tfp.math.psd_kernels
def run_experiment_rotated_mnist_SVGPVAE(args, args_dict):
"""
Function with tensorflow graph and session for SVGPVAE experiments on rotated MNIST data.
For description of SVGPVAE see chapter 7 in SVGPVAE.tex
:param args:
:return:
"""
# define some constants
n = len(args.dataset)
N_train = n * 4050
N_eval = n * 640
N_test = n * 270
if args.save:
# Make a folder to save everything
extra = args.elbo + "_" + str(args.beta)
chkpnt_dir = make_checkpoint_folder(args.base_dir, args.expid, extra)
pic_folder = chkpnt_dir + "pics/"
res_file = chkpnt_dir + "res/ELBO_pandas"
res_file_GP = chkpnt_dir + "res/ELBO_GP_pandas"
if "SVGPVAE" in args.elbo:
res_file_VAE = chkpnt_dir + "res/ELBO_VAE_pandas"
print("\nCheckpoint Directory:\n" + str(chkpnt_dir) + "\n")
json.dump(args_dict, open(chkpnt_dir + "/args.json", "wt"))
# Init plots
if args.show_pics:
plt.ion()
graph = tf.Graph()
with graph.as_default():
# ====================== 1) import data ======================
# shuffled data or not
ending = args.dataset + ".p"
iterator, training_init_op, eval_init_op, test_init_op, train_data_dict, eval_data_dict, test_data_dict, \
eval_batch_size_placeholder, test_batch_size_placeholder = import_rotated_mnist(args.mnist_data_path,
ending, args.batch_size)
# get the batch
input_batch = iterator.get_next()
# ====================== 2) build ELBO graph ======================
# init VAE object
if args.elbo == "CVAE":
VAE = mnistCVAE(L=args.L)
else:
VAE = mnistVAE(L=args.L)
beta = tf.compat.v1.placeholder(dtype=tf.float64, shape=())
# placeholders
train_aux_data_placeholder = tf.compat.v1.placeholder(dtype=tf.float64, shape=(None, 2 + args.M))
train_images_placeholder = tf.compat.v1.placeholder(dtype=tf.float64, shape=(None, 28, 28, 1))
test_aux_data_placeholder = tf.compat.v1.placeholder(dtype=tf.float64, shape=(None, 2 + args.M))
test_images_placeholder = tf.compat.v1.placeholder(dtype=tf.float64, shape=(None, 28, 28, 1))
if "SVGPVAE" in args.elbo: # SVGPVAE
inducing_points_init = generate_init_inducing_points(args.mnist_data_path + 'train_data' + ending,
n=args.nr_inducing_points,
remove_test_angle=None,
PCA=args.PCA, M=args.M)
titsias = 'Titsias' in args.elbo
ip_joint = not args.ip_joint
GP_joint = not args.GP_joint
if args.ov_joint:
if args.PCA: # use PCA embeddings for initialization of object vectors
object_vectors_init = pickle.load(open(args.mnist_data_path +
'pca_ov_init{}.p'.format(args.dataset), 'rb'))
else: # initialize object vectors randomly
object_vectors_init = np.random.normal(0, 1.5,
len(args.dataset)*400*args.M).reshape(len(args.dataset)*400,
args.M)
else:
object_vectors_init = None
# init SVGP object
SVGP_ = mnistSVGP(titsias=titsias, fixed_inducing_points=ip_joint,
initial_inducing_points=inducing_points_init,
fixed_gp_params=GP_joint, object_vectors_init=object_vectors_init, name='main',
jitter=args.jitter, N_train=N_train,
L=args.L, K_obj_normalize=args.object_kernel_normalize)
# forward pass SVGPVAE
C_ma_placeholder = tf.compat.v1.placeholder(dtype=tf.float64, shape=())
lagrange_mult_placeholder = tf.compat.v1.placeholder(dtype=tf.float64, shape=())
alpha_placeholder = tf.compat.v1.placeholder(dtype=tf.float64, shape=())
elbo, recon_loss, KL_term, inside_elbo, ce_term, p_m, p_v, qnet_mu, qnet_var, recon_images, \
inside_elbo_recon, inside_elbo_kl, latent_samples, \
C_ma, lagrange_mult, mean_vectors = forward_pass_SVGPVAE(input_batch,
beta=beta,
vae=VAE,
svgp=SVGP_,
C_ma=C_ma_placeholder,
lagrange_mult=lagrange_mult_placeholder,
alpha=alpha_placeholder,
kappa=np.sqrt(args.kappa_squared),
clipping_qs=args.clip_qs,
GECO=args.GECO,
bias_analysis=args.bias_analysis)
# forward pass standard VAE (for training regime from CASALE: VAE-GP-joint)
recon_loss_VAE, KL_term_VAE, elbo_VAE, \
recon_images_VAE, qnet_mu_VAE, qnet_var_VAE, \
latent_samples_VAE = forward_pass_standard_VAE_rotated_mnist(input_batch,
vae=VAE)
elif args.elbo == "VAE" or args.elbo == "CVAE": # plain VAE or CVAE
CVAE = args.elbo == "CVAE"
recon_loss, KL_term, elbo, \
recon_images, qnet_mu, qnet_var, latent_samples = forward_pass_standard_VAE_rotated_mnist(input_batch,
vae=VAE,
CVAE=CVAE)
else:
raise ValueError
# test loss and predictions
if "SVGPVAE" in args.elbo:
train_encodings_means_placeholder = tf.compat.v1.placeholder(dtype=tf.float64, shape=(None, args.L))
train_encodings_vars_placeholder = tf.compat.v1.placeholder(dtype=tf.float64, shape=(None, args.L))
qnet_mu_train, qnet_var_train, _ = batching_encode_SVGPVAE(input_batch, vae=VAE,
clipping_qs=args.clip_qs)
recon_images_test, \
recon_loss_test = bacthing_predict_SVGPVAE_rotated_mnist(input_batch,
vae=VAE,
svgp=SVGP_,
qnet_mu=train_encodings_means_placeholder,
qnet_var=train_encodings_vars_placeholder,
aux_data_train=train_aux_data_placeholder)
# GP diagnostics
GP_l, GP_amp, GP_ov, GP_ip = SVGP_.variable_summary()
# bias analysis
if args.bias_analysis:
means, vars = batching_encode_SVGPVAE_full(train_images_placeholder,
vae=VAE, clipping_qs=args.clip_qs)
mean_vector_full_data = []
for l in range(args.L):
mean_vector_full_data.append(SVGP_.mean_vector_bias_analysis(index_points=train_aux_data_placeholder,
y=means[:, l], noise=vars[:, l]))
if args.save_latents:
if "SVGPVAE" in args.elbo:
latent_samples_full = latent_samples_SVGPVAE(train_images_placeholder, train_aux_data_placeholder,
vae=VAE, svgp=SVGP_, clipping_qs=args.clip_qs)
else:
latent_samples_full = latent_samples_VAE_full_train(train_images_placeholder,
vae=VAE, clipping_qs=args.clip_qs)
# conditional generation for CVAE
if args.elbo == "CVAE":
recon_images_test, recon_loss_test = predict_CVAE(images_train=train_images_placeholder,
images_test=test_images_placeholder,
aux_data_train=train_aux_data_placeholder,
aux_data_test=test_aux_data_placeholder,
vae=VAE, test_indices=test_data_dict['aux_data'][:, 0])
# ====================== 3) optimizer ops ======================
global_step = tf.Variable(0, name='global_step', trainable=False)
train_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)
lr = tf.compat.v1.placeholder(dtype=tf.float64, shape=())
optimizer = tf.compat.v1.train.AdamOptimizer(learning_rate=lr)
if args.GECO: # minimizing GECO objective
gradients = tf.gradients(elbo, train_vars)
else: # minimizing negative elbo
gradients = tf.gradients(-elbo, train_vars)
optim_step = optimizer.apply_gradients(grads_and_vars=zip(gradients, train_vars),
global_step=global_step)
# ====================== 4) Pandas saver ======================
if args.save:
res_vars = [global_step,
elbo,
recon_loss,
KL_term,
tf.math.reduce_min(qnet_mu),
tf.math.reduce_max(qnet_mu),
tf.math.reduce_min(qnet_var),
tf.math.reduce_max(qnet_var),
qnet_var]
res_names = ["step",
"ELBO",
"recon loss",
"KL term",
"min qnet_mu",
"max qnet_mu",
"min qnet_var",
"max qnet_var",
"full qnet_var"]
if 'SVGPVAE' in args.elbo:
res_vars += [inside_elbo,
inside_elbo_recon,
inside_elbo_kl,
ce_term,
tf.math.reduce_min(p_m),
tf.math.reduce_max(p_m),
tf.math.reduce_min(p_v),
tf.math.reduce_max(p_v),
latent_samples,
C_ma,
lagrange_mult]
res_names += ["inside elbo",
"inside elbo recon",
"inside elbo KL",
"ce_term",
"min p_m",
"max p_m",
"min p_v",
"max p_v",
"latent_samples",
"C_ma",
"lagrange_mult"]
res_vars_VAE = [global_step,
elbo_VAE,
recon_loss_VAE,
KL_term_VAE,
tf.math.reduce_min(qnet_mu_VAE),
tf.math.reduce_max(qnet_mu_VAE),
tf.math.reduce_min(qnet_var_VAE),
tf.math.reduce_max(qnet_var_VAE),
latent_samples_VAE]
res_names_VAE = ["step",
"ELBO",
"recon loss",
"KL term",
"min qnet_mu",
"max qnet_mu",
"min qnet_var",
"max qnet_var",
"latent_samples"]
res_vars_GP = [GP_l,
GP_amp,
GP_ov,
GP_ip]
res_names_GP = ['length scale', 'amplitude', 'object vectors', 'inducing points']
res_saver_VAE = pandas_res_saver(res_file_VAE, res_names_VAE)
res_saver_GP = pandas_res_saver(res_file_GP, res_names_GP)
res_saver = pandas_res_saver(res_file, res_names)
# ====================== 5) print and init trainable params ======================
print_trainable_vars(train_vars)
init_op = tf.global_variables_initializer()
# ====================== 6) saver and GPU ======================
if args.save_model_weights:
saver = tf.compat.v1.train.Saver(max_to_keep=3)
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=args.ram)
# ====================== 7) tf.session ======================
if "SVGPVAE" in args.elbo:
nr_epochs, training_regime = parse_opt_regime(args.opt_regime)
else:
nr_epochs = args.nr_epochs
with tf.Session(config=tf.ConfigProto(gpu_options=gpu_options)) as sess:
sess.run(init_op)
# training loop
first_step = True # switch for initialization of GECO algorithm
C_ma_ = 0.0
lagrange_mult_ = 1.0
start_time = time.time()
cgen_test_set_MSE = []
for epoch in range(nr_epochs):
# 7.1) train for one epoch
sess.run(training_init_op)
elbos, losses = [], []
start_time_epoch = time.time()
if args.bias_analysis:
mean_vectors_arr = []
while True:
try:
if args.GECO and "SVGPVAE" in args.elbo and training_regime[epoch] != 'VAE':
if first_step:
alpha = 0.0
else:
alpha = args.alpha
_, g_s_, elbo_, C_ma_, lagrange_mult_, recon_loss_, mean_vectors_ = sess.run([optim_step, global_step,
elbo, C_ma, lagrange_mult,
recon_loss, mean_vectors],
{beta: args.beta, lr: args.lr,
alpha_placeholder: alpha,
C_ma_placeholder: C_ma_,
lagrange_mult_placeholder: lagrange_mult_})
if args.bias_analysis:
mean_vectors_arr.append(mean_vectors_)
elif args.elbo == "VAE" or args.elbo == "CVAE":
_, g_s_, elbo_, recon_loss_ = sess.run(
[optim_step, global_step, elbo, recon_loss],
{beta: args.beta, lr: args.lr})
else:
_, g_s_, elbo_, recon_loss_ = sess.run([optim_step, global_step, elbo, recon_loss],
{beta: args.beta, lr: args.lr,
alpha_placeholder: args.alpha,
C_ma_placeholder: C_ma_,
lagrange_mult_placeholder: lagrange_mult_})
elbos.append(elbo_)
losses.append(recon_loss_)
first_step = False # switch for initizalition of GECO algorithm
except tf.errors.OutOfRangeError:
if args.bias_analysis:
mean_vector_full_data_ = sess.run(mean_vector_full_data,
{train_images_placeholder: train_data_dict['images'],
train_aux_data_placeholder: train_data_dict['aux_data']})
bias = compute_bias_variance_mean_estimators(mean_vectors_arr, mean_vector_full_data_)
print("Bias for epoch {}: {}".format(epoch, bias))
if (epoch + 1) % 10 == 0:
regime = training_regime[epoch] if "SVGPVAE" in args.elbo else "VAE"
print('Epoch {}, opt regime {}, mean ELBO per batch: {}'.format(epoch, regime,
np.mean(elbos)))
MSE = np.sum(losses) / N_train
print('MSE loss on train set for epoch {} : {}'.format(epoch, MSE))
end_time_epoch = time.time()
print("Time elapsed for epoch {}, opt regime {}: {}".format(epoch,
regime,
end_time_epoch - start_time_epoch))
break
# 7.2) calculate loss on eval set
if args.save and (epoch + 1) % 10 == 0 and "SVGPVAE" in args.elbo:
losses = []
sess.run(eval_init_op, {eval_batch_size_placeholder: args.batch_size})
while True:
try:
recon_loss_ = sess.run(recon_loss, {beta: args.beta, lr: args.lr,
alpha_placeholder: args.alpha,
C_ma_placeholder: C_ma_,
lagrange_mult_placeholder: lagrange_mult_})
losses.append(recon_loss_)
except tf.errors.OutOfRangeError:
MSE = np.sum(losses) / N_eval
print('MSE loss on eval set for epoch {} : {}'.format(epoch, MSE))
break
# 7.3) save metrics to Pandas df for model diagnostics
if args.save and (epoch + 1) % 10 == 0:
if args.test_set_metrics:
# sess.run(test_init_op, {test_batch_size_placeholder: N_test}) # see [update, 7.7.] above
sess.run(test_init_op, {test_batch_size_placeholder: args.batch_size})
else:
# sess.run(eval_init_op, {eval_batch_size_placeholder: N_eval}) # see [update, 7.7.] above
sess.run(eval_init_op, {eval_batch_size_placeholder: args.batch_size})
if "SVGPVAE" in args.elbo:
# save elbo metrics depending on the type of forward pass (plain VAE vs SVGPVAE)
if training_regime[epoch] == 'VAE':
new_res = sess.run(res_vars_VAE, {beta: args.beta})
res_saver_VAE(new_res, 1)
else:
new_res = sess.run(res_vars, {beta: args.beta,
alpha_placeholder: args.alpha,
C_ma_placeholder: C_ma_,
lagrange_mult_placeholder: lagrange_mult_})
res_saver(new_res, 1)
# save GP params
new_res_GP = sess.run(res_vars_GP, {beta: args.beta,
alpha_placeholder: args.alpha,
C_ma_placeholder: C_ma_,
lagrange_mult_placeholder: lagrange_mult_})
res_saver_GP(new_res_GP, 1)
else:
new_res = sess.run(res_vars, {beta: args.beta})
res_saver(new_res, 1)
# 7.4) calculate loss on test set and visualize reconstructed images
if (epoch + 1) % 10 == 0:
losses, recon_images_arr = [], []
sess.run(test_init_op, {test_batch_size_placeholder: args.batch_size})
# test set: reconstruction
while True:
try:
if "SVGPVAE" in args.elbo:
recon_loss_, recon_images_ = sess.run([recon_loss, recon_images],
{beta: args.beta,
alpha_placeholder: args.alpha,
C_ma_placeholder: C_ma_,
lagrange_mult_placeholder: lagrange_mult_})
else:
recon_loss_, recon_images_ = sess.run([recon_loss, recon_images],
{beta: args.beta})
losses.append(recon_loss_)
recon_images_arr.append(recon_images_)
except tf.errors.OutOfRangeError:
MSE = np.sum(losses) / N_test
| |
value=None):
"""Corresponds to IDD field `Drift Loss Fraction`"""
self["Drift Loss Fraction"] = value
@property
def blowdown_concentration_ratio(self):
"""field `Blowdown Concentration Ratio`
| Characterizes the rate of blowdown in the evaporative cooler.
| Blowdown is water intentionally drained from the cooler in order to offset the build
| up of solids in the water that would otherwise occur because of evaporation.
| Ratio of solids in the blowdown water to solids in the make up water.
| A typical value is 3. If left blank then there is no blowdown.
| value >= 2.0
Args:
value (float): value for IDD Field `Blowdown Concentration Ratio`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `blowdown_concentration_ratio` or None if not set
"""
return self["Blowdown Concentration Ratio"]
@blowdown_concentration_ratio.setter
def blowdown_concentration_ratio(self, value=None):
"""Corresponds to IDD field `Blowdown Concentration Ratio`"""
self["Blowdown Concentration Ratio"] = value
@property
def evaporative_operation_minimum_limit_secondary_air_drybulb_temperature(
self):
"""field `Evaporative Operation Minimum Limit Secondary Air Drybulb
Temperature`
| This input field value defines the secondary air inlet node drybulb temperature
| limits in degreeCelsius. When the secondary side entering air dry bulb temperature
| drops below this limit, then the evaporative cooler operation mode changes to dry
| heat exchanger. Users specify their own limits. If this field is left blank, then
| there is no drybulb temperature lower limit for evaporative cooler operation. If
| operating range control is desired then this input field and the next two input
| fields should be specified or all the three should be left blank or left out. If
| no minimum drybulb temperature limit is desired while there are maximum drybulb
| and wetbulb temperature limits then specify very low minimum temperature limit
| value (e.g. -99.0C).
Args:
value (float): value for IDD Field `Evaporative Operation Minimum Limit Secondary Air Drybulb Temperature`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `evaporative_operation_minimum_limit_secondary_air_drybulb_temperature` or None if not set
"""
return self[
"Evaporative Operation Minimum Limit Secondary Air Drybulb Temperature"]
@evaporative_operation_minimum_limit_secondary_air_drybulb_temperature.setter
def evaporative_operation_minimum_limit_secondary_air_drybulb_temperature(
self,
value=None):
"""Corresponds to IDD field `Evaporative Operation Minimum Limit
Secondary Air Drybulb Temperature`"""
self[
"Evaporative Operation Minimum Limit Secondary Air Drybulb Temperature"] = value
@property
def evaporative_operation_maximum_limit_outdoor_wetbulb_temperature(self):
"""field `Evaporative Operation Maximum Limit Outdoor Wetbulb
Temperature`
| This input field value defines the secondary air inlet node wetbulb temperature
| limits in degree Celsius. When the secondary side entering air wet bulb temperature
| exceeds this limit, then the evaporative cooler urns off and does not attempt to do
| any cooling. If this field is left blank, then there is no wetbulb temperature
| upper limit for evaporative cooler wet operation mode. If this input field is left
| blank then, the previous and the next input fields should also be left blank. If no
| maximum wetbulb temperature limits is desired while there are minimum drybulb and
| maximum drybulb upper temperature limits then specify very high maximum wetbulb
| temperature limit value (e.g. 99.0 C).
Args:
value (float): value for IDD Field `Evaporative Operation Maximum Limit Outdoor Wetbulb Temperature`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `evaporative_operation_maximum_limit_outdoor_wetbulb_temperature` or None if not set
"""
return self[
"Evaporative Operation Maximum Limit Outdoor Wetbulb Temperature"]
@evaporative_operation_maximum_limit_outdoor_wetbulb_temperature.setter
def evaporative_operation_maximum_limit_outdoor_wetbulb_temperature(
self,
value=None):
"""Corresponds to IDD field `Evaporative Operation Maximum Limit
Outdoor Wetbulb Temperature`"""
self[
"Evaporative Operation Maximum Limit Outdoor Wetbulb Temperature"] = value
@property
def dry_operation_maximum_limit_outdoor_drybulb_temperature(self):
"""field `Dry Operation Maximum Limit Outdoor Drybulb Temperature`
| This input field value defines the secondary air inlet node drybulb temperature
| limits in degree Celsius. When the secondary side entering air drybulb temperature
| exceeds this limit, then the evaporative cooler will not run in dry operation mode
| or may be turned off depending on its wetbulb temperature. If this field is left
| blank, then there is no drybulb temperature maximum limit for evaporative cooler
| operation. If this input field is left blank then, the previous and the next input
| fields should also be left blank. If no maximum drybulb temperature limit is
| desired while there are minimum drybulb and maximum wetbulb upper temperature
| limits then specify very high maximum drybulb temperature limit value (e.g. 99.0 C).
Args:
value (float): value for IDD Field `Dry Operation Maximum Limit Outdoor Drybulb Temperature`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `dry_operation_maximum_limit_outdoor_drybulb_temperature` or None if not set
"""
return self["Dry Operation Maximum Limit Outdoor Drybulb Temperature"]
@dry_operation_maximum_limit_outdoor_drybulb_temperature.setter
def dry_operation_maximum_limit_outdoor_drybulb_temperature(
self,
value=None):
"""Corresponds to IDD field `Dry Operation Maximum Limit Outdoor
Drybulb Temperature`"""
self["Dry Operation Maximum Limit Outdoor Drybulb Temperature"] = value
class EvaporativeCoolerDirectResearchSpecial(DataObject):
""" Corresponds to IDD object `EvaporativeCooler:Direct:ResearchSpecial`
Direct evaporative cooler with user-specified effectiveness (can represent rigid pad
or similar media), and recirculating water pump, and secondary air fan. This model is
controlled to meet the primary air outlet temperature setpoint.
"""
_schema = {'extensible-fields': OrderedDict(),
'fields': OrderedDict([(u'name',
{'name': u'Name',
'pyname': u'name',
'required-field': False,
'autosizable': False,
'autocalculatable': False,
'type': u'alpha'}),
(u'availability schedule name',
{'name': u'Availability Schedule Name',
'pyname': u'availability_schedule_name',
'required-field': False,
'autosizable': False,
'autocalculatable': False,
'type': u'object-list'}),
(u'cooler design effectiveness',
{'name': u'Cooler Design Effectiveness',
'pyname': u'cooler_design_effectiveness',
'maximum': 1.0,
'required-field': False,
'autosizable': False,
'minimum': 0.0,
'autocalculatable': False,
'type': u'real'}),
(u'effectiveness flow ratio modifier curve name',
{'name': u'Effectiveness Flow Ratio Modifier Curve Name',
'pyname': u'effectiveness_flow_ratio_modifier_curve_name',
'required-field': False,
'autosizable': False,
'autocalculatable': False,
'type': u'object-list'}),
(u'primary air design flow rate',
{'name': u'Primary Air Design Flow Rate',
'pyname': u'primary_air_design_flow_rate',
'minimum>': 0.0,
'required-field': False,
'autosizable': True,
'autocalculatable': False,
'type': u'real',
'unit': u'm3/s'}),
(u'recirculating water pump design power',
{'name': u'Recirculating Water Pump Design Power',
'pyname': u'recirculating_water_pump_design_power',
'required-field': False,
'autosizable': True,
'minimum': 0.0,
'autocalculatable': False,
'type': 'real',
'unit': u'W'}),
(u'water pump power sizing factor',
{'name': u'Water Pump Power Sizing Factor',
'pyname': u'water_pump_power_sizing_factor',
'default': 90.0,
'required-field': False,
'autosizable': False,
'autocalculatable': False,
'type': u'real',
'unit': u'W/(m3/s)'}),
(u'water pump power modifier curve name',
{'name': u'Water Pump Power Modifier Curve Name',
'pyname': u'water_pump_power_modifier_curve_name',
'required-field': False,
'autosizable': False,
'autocalculatable': False,
'type': u'object-list'}),
(u'air inlet node name',
{'name': u'Air Inlet Node Name',
'pyname': u'air_inlet_node_name',
'required-field': True,
'autosizable': False,
'autocalculatable': False,
'type': u'node'}),
(u'air outlet node name',
{'name': u'Air Outlet Node Name',
'pyname': u'air_outlet_node_name',
'required-field': True,
'autosizable': False,
'autocalculatable': False,
'type': u'node'}),
(u'sensor node name',
{'name': u'Sensor Node Name',
'pyname': u'sensor_node_name',
'required-field': True,
'autosizable': False,
'autocalculatable': False,
'type': u'node'}),
(u'water supply storage tank name',
{'name': u'Water Supply Storage Tank Name',
'pyname': u'water_supply_storage_tank_name',
'required-field': False,
'autosizable': False,
'autocalculatable': False,
'type': u'object-list'}),
(u'drift loss fraction',
{'name': u'Drift Loss Fraction',
'pyname': u'drift_loss_fraction',
'required-field': False,
'autosizable': False,
'minimum': 0.0,
'autocalculatable': False,
'type': u'real'}),
(u'blowdown concentration ratio',
{'name': u'Blowdown Concentration Ratio',
'pyname': u'blowdown_concentration_ratio',
'required-field': False,
'autosizable': False,
'minimum': 2.0,
'autocalculatable': False,
'type': u'real'}),
(u'evaporative operation minimum drybulb temperature',
{'name': u'Evaporative Operation Minimum Drybulb Temperature',
'pyname': u'evaporative_operation_minimum_drybulb_temperature',
'required-field': False,
'autosizable': False,
'minimum': -99.0,
'autocalculatable': False,
'type': u'real'}),
(u'evaporative operation maximum limit wetbulb temperature',
{'name': u'Evaporative Operation Maximum Limit Wetbulb Temperature',
'pyname': u'evaporative_operation_maximum_limit_wetbulb_temperature',
'required-field': False,
'autosizable': False,
'autocalculatable': False,
'type': u'real'}),
(u'evaporative operation maximum limit drybulb temperature',
{'name': u'Evaporative Operation Maximum Limit Drybulb Temperature',
'pyname': u'evaporative_operation_maximum_limit_drybulb_temperature',
'required-field': False,
'autosizable': False,
'autocalculatable': False,
'type': u'real'})]),
'format': None,
'group': u'Evaporative Coolers',
'min-fields': 11,
'name': u'EvaporativeCooler:Direct:ResearchSpecial',
'pyname': u'EvaporativeCoolerDirectResearchSpecial',
'required-object': False,
'unique-object': False}
@property
def name(self):
"""field `Name`
Args:
value (str): value for IDD Field `Name`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `name` or None if not set
"""
return self["Name"]
@name.setter
def name(self, value=None):
"""Corresponds to IDD field `Name`"""
self["Name"] = value
@property
def availability_schedule_name(self):
"""field `Availability Schedule Name`
| Availability schedule name for this system. Schedule value > 0 means the system is available.
| If this field is blank, the system is always available.
Args:
value (str): value for IDD Field `Availability Schedule Name`
Raises:
ValueError: if `value` is | |
word[6] == "n" :
toGuess = toGuess[:6] + "n" + toGuess[7:]
if word[1] != "N" and word[1] != "n" and word[2] != "N" and word[2] != "n" and word[3] != "N" and word[3] != "n" and word[4] != "N" and word[4] != "n" and word[5] != "N" and word[5] != "n" and word[6] != "N" and word[6] != "n" :
print("\nWrong!\n")
numberOfErrors = numberOfErrors + 1
wrongChars = wrongChars + "n" + ", "
if guessChar == "O" or guessChar == "o" :
if word[1] == "O" or word[1] == "o" :
toGuess = toGuess[:1] + "o" + toGuess[2:]
if word[2] == "O" or word[2] == "o" :
toGuess = toGuess[:2] + "o" + toGuess[3:]
if word[3] == "O" or word[3] == "o" :
toGuess = toGuess[:3] + "o" + toGuess[4:]
if word[4] == "O" or word[4] == "o" :
toGuess = toGuess[:4] + "o" + toGuess[5:]
if word[5] == "O" or word[5] == "o" :
toGuess = toGuess[:5] + "o" + toGuess[6:]
if word[6] == "O" or word[6] == "o" :
toGuess = toGuess[:6] + "o" + toGuess[7:]
if word[1] != "O" and word[1] != "o" and word[2] != "O" and word[2] != "o" and word[3] != "O" and word[3] != "o" and word[4] != "O" and word[4] != "o" and word[5] != "O" and word[5] != "o" and word[6] != "O" and word[6] != "o" :
print("\nWrong!\n")
numberOfErrors = numberOfErrors + 1
wrongChars = wrongChars + "o" + ", "
if guessChar == "P" or guessChar == "p" :
if word[1] == "P" or word[1] == "p" :
toGuess = toGuess[:1] + "p" + toGuess[2:]
if word[2] == "P" or word[2] == "p" :
toGuess = toGuess[:2] + "p" + toGuess[3:]
if word[3] == "P" or word[3] == "p" :
toGuess = toGuess[:3] + "p" + toGuess[4:]
if word[4] == "P" or word[4] == "p" :
toGuess = toGuess[:4] + "p" + toGuess[5:]
if word[5] == "P" or word[5] == "p" :
toGuess = toGuess[:5] + "p" + toGuess[6:]
if word[6] == "P" or word[6] == "p" :
toGuess = toGuess[:6] + "p" + toGuess[7:]
if word[1] != "P" and word[1] != "p" and word[2] != "P" and word[2] != "p" and word[3] != "P" and word[3] != "p" and word[4] != "P" and word[4] != "p" and word[5] != "P" and word[5] != "p" and word[6] != "P" and word[6] != "p" :
print("\nWrong!\n")
numberOfErrors = numberOfErrors + 1
wrongChars = wrongChars + "p" + ", "
if guessChar == "Q" or guessChar == "q" :
if word[1] == "Q" or word[1] == "q" :
toGuess = toGuess[:1] + "q" + toGuess[2:]
if word[2] == "Q" or word[2] == "q" :
toGuess = toGuess[:2] + "q" + toGuess[3:]
if word[3] == "Q" or word[3] == "q" :
toGuess = toGuess[:3] + "q" + toGuess[4:]
if word[4] == "Q" or word[4] == "q" :
toGuess = toGuess[:4] + "q" + toGuess[5:]
if word[5] == "Q" or word[5] == "q" :
toGuess = toGuess[:5] + "q" + toGuess[6:]
if word[6] == "Q" or word[6] == "q" :
toGuess = toGuess[:6] + "q" + toGuess[7:]
if word[1] != "Q" and word[1] != "q" and word[2] != "Q" and word[2] != "q" and word[3] != "Q" and word[3] != "q" and word[4] != "Q" and word[4] != "q" and word[5] != "Q" and word[5] != "q" and word[6] != "Q" and word[6] != "q" :
print("\nWrong!\n")
numberOfErrors = numberOfErrors + 1
wrongChars = wrongChars + "q" + ", "
if guessChar == "R" or guessChar == "r" :
if word[1] == "R" or word[1] == "r" :
toGuess = toGuess[:1] + "r" + toGuess[2:]
if word[2] == "R" or word[2] == "r" :
toGuess = toGuess[:2] + "r" + toGuess[3:]
if word[3] == "R" or word[3] == "r" :
toGuess = toGuess[:3] + "r" + toGuess[4:]
if word[4] == "R" or word[4] == "r" :
toGuess = toGuess[:4] + "r" + toGuess[5:]
if word[5] == "R" or word[5] == "r" :
toGuess = toGuess[:5] + "r" + toGuess[6:]
if word[6] == "R" or word[6] == "r" :
toGuess = toGuess[:6] + "r" + toGuess[7:]
if word[1] != "R" and word[1] != "r" and word[2] != "R" and word[2] != "r" and word[3] != "R" and word[3] != "r" and word[4] != "R" and word[4] != "r" and word[5] != "R" and word[5] != "r" and word[6] != "R" and word[6] != "r" :
print("\nWrong!\n")
numberOfErrors = numberOfErrors + 1
wrongChars = wrongChars + "r" + ", "
if guessChar == "S" or guessChar == "s" :
if word[1] == "S" or word[1] == "s" :
toGuess = toGuess[:1] + "s" + toGuess[2:]
if word[2] == "S" or word[2] == "s" :
toGuess = toGuess[:2] + "s" + toGuess[3:]
if word[3] == "S" or word[3] == "s" :
toGuess = toGuess[:3] + "s" + toGuess[4:]
if word[4] == "S" or word[4] == "s" :
toGuess = toGuess[:4] + "s" + toGuess[5:]
if word[5] == "S" or word[5] == "s" :
toGuess = toGuess[:5] + "s" + toGuess[6:]
if word[6] == "S" or word[6] == "s" :
toGuess = toGuess[:6] + "s" + toGuess[7:]
if word[1] != "S" and word[1] != "s" and word[2] != "S" and word[2] != "s" and word[3] != "S" and word[3] != "s" and word[4] != "S" and word[4] != "s" and word[5] != "S" and word[5] != "s" and word[6] != "S" and word[6] != "s" :
print("\nWrong!\n")
numberOfErrors = numberOfErrors + 1
wrongChars = wrongChars + "s" + ", "
if guessChar == "T" or guessChar == "t" :
if word[1] == "T" or word[1] == "t" :
toGuess = toGuess[:1] + "t" + toGuess[2:]
if word[2] == "T" or word[2] == "t" :
toGuess = toGuess[:2] + "t" + toGuess[3:]
if word[3] == "T" or word[3] == "t" :
toGuess = toGuess[:3] + "t" + toGuess[4:]
if word[4] == "T" or word[4] == "t" :
toGuess = toGuess[:4] + "t" + toGuess[5:]
if word[5] == "T" or word[5] == "t" :
toGuess = toGuess[:5] + "t" + toGuess[6:]
if word[6] == "T" or word[6] == "t" :
toGuess = toGuess[:6] + "t" + toGuess[7:]
if word[1] != "T" and word[1] != "t" and word[2] != "T" and word[2] != "t" and word[3] != "T" and word[3] != "t" and word[4] != "T" and word[4] != "t" and word[5] != "T" and word[5] != "t" and word[6] != "T" and word[6] != "t" :
print("\nWrong!\n")
numberOfErrors = numberOfErrors + 1
wrongChars = wrongChars + "t" + ", "
if guessChar == "U" or guessChar == "u" :
if word[1] == "U" or word[1] == "u" :
toGuess = toGuess[:1] + "u" + toGuess[2:]
if word[2] == "U" or word[2] == "u" :
toGuess = toGuess[:2] + "u" + toGuess[3:]
if word[3] == "U" or word[3] == "u" :
toGuess = toGuess[:3] + "u" + toGuess[4:]
if word[4] == "U" or word[4] == "u" :
toGuess = toGuess[:4] + "u" + toGuess[5:]
if word[5] == "U" or word[5] == "u" :
toGuess = toGuess[:5] + "u" + toGuess[6:]
if word[6] == "U" or word[6] == "u" :
toGuess = toGuess[:6] + "u" + toGuess[7:]
if word[1] != "U" and word[1] != "u" and word[2] != "U" and word[2] != "u" and word[3] != "U" and word[3] != "u" and word[4] != "U" and word[4] != "u" and word[5] != "U" and word[5] != "u" and word[6] != "U" and word[6] != "u" :
print("\nWrong!\n")
numberOfErrors = numberOfErrors + 1
wrongChars = wrongChars + "u" + ", "
if guessChar == "V" or guessChar == "v" :
if word[1] == "V" or word[1] == "v" :
toGuess = toGuess[:1] + "v" + toGuess[2:]
if word[2] == "V" or word[2] == "v" :
toGuess = toGuess[:2] + "v" + toGuess[3:]
if word[3] == "V" or word[3] == "v" :
toGuess = toGuess[:3] + "v" + toGuess[4:]
if word[4] == "V" or word[4] == "v" :
toGuess = toGuess[:4] + "v" + toGuess[5:]
if word[5] == "V" or word[5] == "v" :
toGuess = toGuess[:5] + "v" + toGuess[6:]
if word[6] == "V" or word[6] == "v" :
toGuess = toGuess[:6] + "v" + toGuess[7:]
if word[1] != "V" and word[1] != "v" and word[2] != "V" and word[2] != "v" and word[3] != "V" and word[3] != "v" and word[4] != "V" and word[4] != "v" and word[5] != "V" and word[5] != "v" and word[6] != "V" and word[6] != "v" :
print("\nWrong!\n")
numberOfErrors = numberOfErrors + 1
wrongChars = wrongChars + "v" + ", "
if guessChar == "W" | |
(120.4679, 0.48156900000000002),
(40.980319999999999, 0.34456520000000002)]),
('S',
[(786.28520000000003, 0.0024518629999999999),
(186.887, 0.032595789999999999),
(60.009349999999998, 0.1238242),
(22.25883, -0.043598900000000003),
(8.8851490000000002, -0.61771810000000005),
(3.6092110000000002, -0.44328230000000002)]),
('P',
[(786.28520000000003, 0.0040395300000000004),
(186.887, 0.031225699999999999),
(60.009349999999998, 0.1349833),
(22.25883, 0.34247929999999999),
(8.8851490000000002, 0.46231129999999998),
(3.6092110000000002, 0.21775240000000001)]),
('S',
[(29.84355, -0.002586302),
(9.5423829999999992, 0.071884240000000002),
(4.0567900000000003, 0.25032599999999999),
(1.7047030000000001, -0.29910029999999999),
(0.70623400000000003, -0.74468179999999995),
(0.27953600000000001, -0.17997759999999999)]),
('P',
[(29.84355, -0.0060966520000000001),
(9.5423829999999992, -0.026288840000000001),
(4.0567900000000003, 0.050910009999999999),
(1.7047030000000001, 0.37980969999999997),
(0.70623400000000003, 0.51708829999999995),
(0.27953600000000001, 0.18297720000000001)]),
('S',
[(1.065609, 0.064829780000000004),
(0.42593300000000001, 0.32537559999999999),
(0.076319999999999999, -1.170806)]),
('P',
[(1.065609, -0.29384399999999999),
(0.42593300000000001, 0.092353229999999994),
(0.076319999999999999, 0.98479300000000003)]),
('S', [(0.029593999999999999, 1.0)]),
('P', [(0.029593999999999999, 1.0)]),
('D',
[(11.14701, 0.087476719999999994),
(2.821043, 0.3795635),
(0.81962000000000002, 0.71803930000000005)]),
('D', [(0.221468, 1.0)]),
('F', [(0.80000000000000004, 1.0)])],
22: [('S',
[(43152.949999999997, 0.0017918719999999999),
(6479.5709999999999, 0.013723920000000001),
(1475.675, 0.067628300000000002),
(415.69909999999999, 0.23376420000000001),
(133.00059999999999, 0.48106959999999999),
(45.272219999999997, 0.34622799999999998)]),
('S',
[(874.68259999999998, 0.0024310080000000001),
(207.9785, 0.032330270000000001),
(66.879180000000005, 0.124252),
(24.873470000000001, -0.039039049999999999),
(9.9684410000000003, -0.61717889999999997),
(4.0638259999999997, -0.44730969999999998)]),
('P',
[(874.68259999999998, 0.0040176789999999997),
(207.9785, 0.03113966),
(66.879180000000005, 0.13490769999999999),
(24.873470000000001, 0.34316720000000001),
(9.9684410000000003, 0.46257599999999999),
(4.0638259999999997, 0.21546029999999999)]),
('S',
[(33.643630000000002, -0.0029403580000000001),
(10.87565, 0.071631029999999998),
(4.6282249999999996, 0.25289149999999999),
(1.950126, -0.29664010000000002),
(0.80945199999999995, -0.74322149999999998),
(0.32047399999999998, -0.18535199999999999)]),
('P',
[(33.643630000000002, -0.0063116200000000004),
(10.87565, -0.026976380000000001),
(4.6282249999999996, 0.053168470000000002),
(1.950126, 0.38455489999999998),
(0.80945199999999995, 0.51276619999999995),
(0.32047399999999998, 0.18111350000000001)]),
('S',
[(1.224148, 0.063514650000000006),
(0.484263, 0.31514039999999999),
(0.084096000000000004, -1.162595)]),
('P',
[(1.224148, -0.21120700000000001),
(0.484263, 0.077719979999999994),
(0.084096000000000004, 0.98982139999999996)]),
('S', [(0.032036000000000002, 1.0)]),
('P', [(0.032036000000000002, 1.0)]),
('D',
[(13.690849999999999, 0.085894180000000001),
(3.5131540000000001, 0.3784671),
(1.0404340000000001, 0.71612390000000004)]),
('D', [(0.28696199999999999, 1.0)]),
('F', [(0.80000000000000004, 1.0)])],
23: [('S',
[(47354.330000000002, 0.0017845129999999999),
(7110.7870000000003, 0.013667540000000001),
(1619.5909999999999, 0.067361219999999999),
(456.33789999999999, 0.23305519999999999),
(146.06059999999999, 0.48063159999999999),
(49.757910000000003, 0.34748020000000002)]),
('S',
[(968.14840000000004, 0.0024105989999999998),
(230.28210000000001, 0.032072429999999999),
(74.145910000000001, 0.1245942),
(27.641069999999999, -0.034821770000000002),
(11.114750000000001, -0.61673739999999999),
(4.543113, -0.45098440000000001)]),
('P',
[(968.14840000000004, 0.0039950050000000003),
(230.28210000000001, 0.03104061),
(74.145910000000001, 0.1347747),
(27.641069999999999, 0.34372789999999998),
(11.114750000000001, 0.46287590000000001),
(4.543113, 0.21355469999999999)]),
('S',
[(37.640500000000003, -0.003233199),
(12.28238, 0.07130744),
(5.2333660000000002, 0.254382),
(2.2089500000000002, -0.2933887),
(0.91788000000000003, -0.74156949999999999),
(0.36341200000000001, -0.190941)]),
('P',
[(37.640500000000003, -0.0064940559999999998),
(12.28238, -0.027534530000000002),
(5.2333660000000002, 0.055162839999999998),
(2.2089500000000002, 0.38796720000000001),
(0.91788000000000003, 0.50902579999999997),
(0.36341200000000001, 0.18038399999999999)]),
('S',
[(1.392781, 0.061397029999999998),
(0.54391299999999998, 0.30611300000000002),
(0.091476000000000002, -1.15489)]),
('P',
[(1.392781, -0.1891265),
(0.54391299999999998, 0.080054529999999999),
(0.091476000000000002, 0.9877399)]),
('S', [(0.034312000000000002, 1.0)]),
('P', [(0.034312000000000002, 1.0)]),
('D',
[(16.050249999999998, 0.085998989999999997),
(4.1600630000000001, 0.38029960000000002),
(1.2432650000000001, 0.71276589999999995)]),
('D', [(0.344277, 1.0)]),
('F', [(0.80000000000000004, 1.0)])],
24: [('S',
[(51789.809999999998, 0.0017761820000000001),
(7776.8490000000002, 0.01360476),
(1771.385, 0.067069249999999997),
(499.15879999999999, 0.2323104),
(159.79820000000001, 0.48024099999999997),
(54.470210000000002, 0.3487653)]),
('S',
[(1064.328, 0.0023996690000000001),
(253.21379999999999, 0.031948860000000003),
(81.60924, 0.1250868),
(30.481929999999998, -0.032218660000000003),
(12.29439, -0.61722840000000001),
(5.0377219999999996, -0.45259359999999998)]),
('P',
[(1064.328, 0.0039869969999999999),
(253.21379999999999, 0.03104662),
(81.60924, 0.1350518),
(30.481929999999998, 0.34488649999999998),
(12.29439, 0.46285710000000002),
(5.0377219999999996, 0.2110426)]),
('S',
[(41.562910000000002, -0.003454216),
(13.676270000000001, 0.072184280000000003),
(5.8443899999999998, 0.25448199999999999),
(2.4716089999999999, -0.29345339999999998),
(1.028308, -0.73854549999999997),
(0.40725, -0.19471569999999999)]),
('P',
[(41.562910000000002, -0.006722497),
(13.676270000000001, -0.02806471),
(5.8443899999999998, 0.05820028),
(2.4716089999999999, 0.39169880000000001),
(1.028308, 0.50478230000000002),
(0.40725, 0.17902899999999999)]),
('S',
[(1.571464, 0.058922189999999999),
(0.60558000000000001, 0.29760550000000002),
(0.098560999999999996, -1.1475059999999999)]),
('P',
[(1.571464, -0.19300999999999999),
(0.60558000000000001, 0.096056199999999994),
(0.098560999999999996, 0.98176090000000005)]),
('S', [(0.036458999999999998, 1.0)]),
('P', [(0.036458999999999998, 1.0)]),
('D',
[(18.4193, 0.086508160000000001),
(4.8126610000000003, 0.38266990000000001),
(1.446447, 0.70937720000000004)]),
('D', [(0.40041300000000002, 1.0)]),
('F', [(0.80000000000000004, 1.0)])],
25: [('S',
[(56347.139999999999, 0.00177158),
(8460.9429999999993, 0.013570810000000001),
(1927.325, 0.066906049999999995),
(543.23429999999996, 0.23185410000000001),
(173.9905, 0.47990460000000001),
(59.360050000000001, 0.34957369999999999)]),
('S',
[(1165.412, 0.0023887510000000002),
(277.32760000000002, 0.031817079999999998),
(89.47278, 0.125467),
(33.482559999999999, -0.02955431),
(13.540369999999999, -0.61751599999999995),
(5.5579720000000004, -0.45444580000000001)]),
('P',
[(1165.412, 0.0039773179999999997),
(277.32760000000002, 0.031031119999999999),
(89.47278, 0.13518939999999999),
(33.482559999999999, 0.34573870000000001),
(13.540369999999999, 0.46292050000000001),
(5.5579720000000004, 0.2090592)]),
('S',
[(45.835320000000003, -0.0036658559999999999),
(15.18777, 0.072319709999999995),
(6.5007099999999998, 0.25444860000000002),
(2.7515830000000001, -0.29103800000000002),
(1.1454040000000001, -0.73598600000000003),
(0.45368700000000001, -0.19976169999999999)]),
('P',
[(45.835320000000003, -0.0068875780000000001),
(15.18777, -0.028468159999999999),
(6.5007099999999998, 0.060318320000000002),
(2.7515830000000001, 0.39389610000000003),
(1.1454040000000001, 0.50137690000000001),
(0.45368700000000001, 0.17922640000000001)]),
('S',
[(1.7579990000000001, 0.056285719999999997),
(0.667022, 0.28974909999999998),
(0.105129, -1.1406529999999999)]),
('P',
[(1.7579990000000001, -0.50350240000000002),
(0.667022, 0.23450109999999999),
(0.105129, 0.91412570000000004)]),
('S', [(0.038418000000000001, 1.0)]),
('P', [(0.038418000000000001, 1.0)]),
('D',
[(20.943549999999998, 0.086727020000000002),
(5.5104860000000002, 0.38418829999999998),
(1.665038, 0.70690710000000001)]),
('D', [(0.461733, 1.0)]),
('F', [(0.80000000000000004, 1.0)])],
26: [('S',
[(61132.620000000003, 0.0017661109999999999),
(9179.3420000000006, 0.01353038),
(2090.857, 0.066731280000000004),
(589.24789999999996, 0.2314823),
(188.7543, 0.47970580000000002),
(64.446290000000005, 0.3501976)]),
('S',
[(1259.98, 0.0024380140000000001),
(299.87610000000001, 0.032240480000000002),
(96.849170000000001, 0.1265724),
(36.310200000000002, -0.03139902),
(14.72996, -0.62075930000000001),
(6.0660749999999997, -0.45029140000000001)]),
('P',
[(1259.98, 0.0040280189999999999),
(299.87610000000001, 0.031446469999999997),
(96.849170000000001, 0.1368317),
(36.310200000000002, 0.34872360000000002),
(14.72996, 0.46179310000000001),
(6.0660749999999997, 0.20430580000000001)]),
('S',
[(50.434849999999997, -0.0038732559999999998),
(16.839289999999998, 0.071965979999999999),
(7.1920859999999998, 0.25565909999999997),
(3.05342, -0.28828369999999998),
(1.2736430000000001, -0.7342822),
(0.50409099999999996, -0.20493529999999999)]),
('P',
[(50.434849999999997, -0.0070171280000000001),
(16.839289999999998, -0.028776599999999999),
(7.1920859999999998, 0.06181383),
(3.05342, 0.39549459999999997),
(1.2736430000000001, 0.49890590000000001),
(0.50409099999999996, 0.17912510000000001)]),
('S',
[(1.9503159999999999, 0.056948690000000003),
(0.73672099999999996, 0.28829149999999998),
(0.114177, -1.1381589999999999)]),
('P',
[(1.9503159999999999, -0.4593796),
(0.73672099999999996, 0.28521390000000002),
(0.114177, 0.90764849999999997)]),
('S', [(0.041147999999999997, 1.0)]),
('P', [(0.041147999999999997, 1.0)]),
('D',
[(23.149940000000001, 0.088769349999999997),
(6.1223679999999998, 0.38963189999999998),
(1.8466009999999999, 0.70148160000000004)]),
('D', [(0.50436099999999995, 1.0)]),
('F', [(0.80000000000000004, 1.0)])],
27: [('S',
[(66148.990000000005, 0.0017597870000000001),
(9933.0769999999993, 0.01348162),
(2262.8159999999998, 0.066493419999999998),
(637.91539999999998, 0.2307939),
(204.41220000000001, 0.47929189999999999),
(69.825379999999996, 0.35140969999999999)]),
('S',
[(1378.8409999999999, 0.0023762760000000001),
(328.26940000000002, 0.031674500000000001),
(106.0946, 0.12628880000000001),
(39.832749999999997, -0.02584552),
(16.186219999999999, -0.61834909999999998),
(6.6677879999999998, -0.45670080000000002)]),
('P',
[(1378.8409999999999, 0.0039714879999999996),
(328.26940000000002, 0.03108174),
(106.0946, 0.1357439),
(39.832749999999997, 0.34768270000000001),
(16.186219999999999, 0.46263399999999999),
(6.6677879999999998, 0.20516319999999999)]),
('S',
[(54.52355, -0.0039930039999999997),
(18.297830000000001, 0.074096629999999997),
(7.8673479999999998, 0.25419999999999998),
(3.3405339999999999, -0.29216569999999997),
(1.393756, -0.73187029999999997),
(0.55132599999999998, -0.20407839999999999)]),
('P',
[(54.52355, -0.0072907720000000001),
(18.297830000000001, -0.029260270000000001),
(7.8673479999999998, 0.065641500000000005),
(3.3405339999999999, 0.40006520000000001),
(1.393756, 0.49502360000000001),
(0.55132599999999998, 0.17582400000000001)]),
('S',
[(2.1519469999999998, 0.053798430000000001),
(0.81106299999999998, 0.2759971),
(0.121017, -1.1296919999999999)]),
('P',
[(2.1519469999999998, -0.21654960000000001),
(0.81106299999999998, 0.1240488),
(0.121017, 0.9724064)]),
('S', [(0.043036999999999999, 1.0)]),
('P', [(0.043036999999999999, 1.0)]),
('D',
[(25.593060000000001, 0.090047479999999999),
(6.8009899999999996, 0.39317029999999997),
(2.051647, 0.69768439999999998)]),
('D', [(0.55567100000000003, 1.0)]),
('F', [(0.80000000000000004, 1.0)])],
28: [('S',
[(71396.350000000006, 0.0017530029999999999),
(10720.84, 0.013431220000000001),
(2442.1289999999999, 0.066270410000000002),
(688.42650000000003, 0.23025080000000001),
(220.61529999999999, 0.47901860000000002),
(75.393730000000005, 0.3523444)]),
('S',
[(1492.5319999999999, 0.0023707139999999999),
(355.40129999999999, 0.031605660000000001),
(114.9534, 0.12663350000000001),
(43.22043, -0.02417037),
(17.597100000000001, -0.61877749999999998),
(7.257765, -0.457677)]),
('P',
[(1492.5319999999999, 0.0039675539999999999),
(355.40129999999999, 0.031094790000000001),
(114.9534, 0.13595170000000001),
(43.22043, 0.34851359999999998),
(17.597100000000001, 0.46254980000000001),
(7.257765, 0.20351859999999999)]),
('S',
[(59.352609999999999, -0.0041620019999999997),
(20.021809999999999, 0.074251109999999995),
(8.6145610000000001, 0.25413599999999997),
(3.6605310000000002, -0.29034769999999999),
(1.528111, -0.73021210000000003),
(0.60405699999999996, -0.2076057)]),
('P',
[(59.352609999999999, -0.0074214520000000003),
(20.021809999999999, -0.029534100000000001),
(8.6145610000000001, 0.067318520000000007),
(3.6605310000000002, 0.40166600000000002),
(1.528111, 0.4926623),
(0.60405699999999996, 0.17568929999999999)]),
('S',
[(2.3792759999999999, 0.051578880000000001),
(0.88583900000000004, 0.27076109999999998),
(0.128529, -1.12477)]),
('P',
[(2.3792759999999999, -0.1887663),
(0.88583900000000004, 0.1015199),
(0.128529, 0.97909060000000003)]),
('S', [(0.045194999999999999, 1.0)]),
('P', [(0.045194999999999999, 1.0)]),
('D',
[(28.191469999999999, 0.090988810000000003),
(7.5235839999999996, 0.39582079999999997),
(2.2712279999999998, 0.69471539999999998)]),
('D', [(0.61160300000000001, 1.0)]),
('F', [(0.80000000000000004, 1.0)])],
29: [('S',
[(76794.380000000005, 0.001748161),
(11530.700000000001, 0.01339602),
(2626.5749999999998, 0.066108849999999997),
(740.49030000000005, 0.22982649999999999),
(237.3528, 0.47876750000000001),
(81.158180000000002, 0.3530739)]),
('S',
[(1610.8140000000001, 0.0023640549999999999),
(383.63670000000002, 0.031536349999999998),
(124.1733, 0.12694520000000001),
(46.746780000000001, -0.0226284),
(19.06569, -0.61920799999999998),
(7.8715669999999998, -0.45853929999999998)]),
('P',
[(1610.8140000000001, 0.0039633070000000001),
(383.63670000000002, 0.031102230000000002),
(124.1733, 0.13613500000000001),
(46.746780000000001, 0.34929139999999997),
(19.06569, 0.462478),
(7.8715669999999998, 0.2020102)]),
('S',
[(64.457319999999996, -0.0043310750000000002),
(21.852119999999999, 0.074123069999999999),
(9.4053430000000002, 0.25421080000000001),
(3.9991680000000001, -0.28748430000000003),
(1.6702969999999999, -0.7291436),
(0.65962699999999996, -0.2113951)]),
('P',
[(64.457319999999996, -0.0075237250000000002),
(21.852119999999999, -0.029756870000000001),
(9.4053430000000002, 0.068496539999999995),
(3.9991680000000001, 0.40271410000000002),
(1.6702969999999999, 0.49084899999999998),
(0.65962699999999996, 0.17592679999999999)]),
('S',
[(2.600088, 0.050275769999999997),
(0.96309400000000001, 0.26500400000000002),
(0.136161, -1.120155)]),
('P',
[(2.600088, -0.1702911),
(0.96309400000000001, 0.093101329999999996),
(0.136161, 0.98143360000000002)]),
('S', [(0.047331999999999999, 1.0)]),
('P', [(0.047331999999999999, 1.0)]),
('D',
[(30.85341, 0.091999049999999999),
(8.2649849999999994, 0.39850210000000003),
(2.4953319999999999, 0.69178969999999995)]),
('D', [(0.66765799999999997, 1.0)]),
('F', [(0.80000000000000004, 1.0)])],
30: [('S',
[(82400.940000000002, 0.0017433290000000001),
(12372.549999999999, 0.013359660000000001),
(2818.3510000000001, 0.065943650000000006),
(794.57169999999996, 0.22941510000000001),
(254.72319999999999, 0.47854530000000001),
(87.138800000000003, 0.35377530000000001)]),
('S',
[(1732.569, 0.0023614590000000002),
(412.7149, 0.031501769999999998),
(133.678, 0.12727740000000001),
(50.385849999999998, -0.021459280000000001),
(20.583580000000001, -0.61976520000000002),
(8.5059400000000007, -0.45901799999999998)]),
('P',
[(1732.569, 0.0039631249999999996),
(412.7149, 0.03113411),
(133.678, 0.13639309999999999),
(50.385849999999998, 0.35012660000000001),
(20.583580000000001, 0.4623179),
(8.5059400000000007, 0.2004995)]),
('S',
[(69.364919999999998, -0.004440098),
(23.620819999999998, 0.075052530000000006),
(10.184710000000001, 0.25331110000000001),
(4.3340820000000004, -0.28818969999999999),
(1.810918, -0.72670520000000005),
(0.71484099999999995, -0.2133439)]),
('P',
[(69.364919999999998, -0.0076892619999999997),
(23.620819999999998, -0.029979820000000001),
(10.184710000000001, 0.070824109999999996),
(4.3340820000000004, 0.40461409999999998),
(1.810918, 0.48823250000000001),
(0.71484099999999995, 0.17519699999999999)]),
('S',
[(2.823842, 0.048985430000000003),
(1.0395430000000001, 0.25927929999999999),
(0.143264, -1.1157109999999999)]),
('P',
[(2.823842, -0.15867629999999999),
(1.0395430000000001, 0.083793270000000003),
(0.143264, 0.98405469999999995)]),
('S', [(0.049296, 1.0)]),
('P', [(0.049296, 1.0)]),
('D',
[(33.707639999999998, 0.092626479999999997),
(9.0611060000000005, 0.40029799999999999),
(2.7383829999999998, 0.68966079999999996)]),
('D', [(0.730294, 1.0)]),
('F', [(0.80000000000000004, 1.0)])]}
g631 = \
{1: [('S',
[(18.731137, 0.033494599999999999),
(2.8253936999999998, 0.23472694999999999),
(0.64012170000000002, 0.81375732999999995)]),
('S', [(0.1612778, 1.0)])],
2: [('S',
[(38.421633999999997, 0.023765999999999999),
(5.7780300000000002, 0.15467900000000001),
(1.2417739999999999, 0.46962999999999999)]),
('S', [(0.29796400000000001, 1.0)])],
3: [('S',
[(642.41891999999996, 0.0021426000000000001),
(96.798514999999995, 0.016208899999999998),
(22.091121000000001, 0.077315599999999998),
(6.2010702999999996, 0.245786),
(1.9351176999999999, 0.47018900000000002),
(0.63673579999999996, 0.34547080000000002)]),
('S',
[(2.3249184000000001, -0.035091700000000003),
(0.63243059999999995, -0.19123280000000001),
(0.079053399999999996, 1.0839878000000001)]),
('P',
[(2.3249184000000001, 0.0089414999999999998),
(0.63243059999999995, 0.14100950000000001),
(0.079053399999999996, 0.94536370000000003)]),
('S', [(0.035962000000000001, 1.0)]),
('P', [(0.035962000000000001, 1.0)])],
4: [('S',
[(1264.5857000000001, 0.0019448),
(189.93681000000001, 0.0148351),
(43.159089000000002, 0.072090600000000005),
(12.098663, 0.23715420000000001),
(3.8063232, 0.46919870000000002),
(1.2728903, 0.35652020000000001)]),
('S',
[(3.1964630999999999, -0.1126487),
(0.74781330000000001, -0.2295064),
(0.2199663, 1.1869167)]),
('P',
[(3.1964630999999999, 0.055980200000000001),
(0.74781330000000001, 0.26155060000000002),
(0.2199663, 0.79397229999999996)]),
('S', [(0.082309900000000005, 1.0)]),
('P', [(0.082309900000000005, 1.0)])],
5: [('S',
[(2068.8823000000002, 0.0018663),
(310.64956999999998, 0.0142515),
(70.683032999999995, 0.069551600000000005),
(19.861080000000001, 0.2325729),
(6.2993047999999998, 0.46707870000000001),
(2.127027, 0.36343140000000002)]),
('S',
[(4.7279710000000001, -0.1303938),
(1.1903376999999999, -0.13078890000000001),
(0.3594117, 1.1309444)]),
('P',
[(4.7279710000000001, 0.0745976),
(1.1903376999999999, 0.30784669999999997),
(0.3594117, 0.74345680000000003)]),
('S', [(0.12675120000000001, 1.0)]),
('P', [(0.12675120000000001, 1.0)])],
6: [('S',
[(3047.5248999999999, 0.0018347000000000001),
(457.36950999999999, 0.014037300000000001),
(103.94869, 0.068842600000000004),
(29.210155, 0.23218440000000001),
(9.2866630000000008, 0.4679413),
(3.1639270000000002, 0.36231200000000002)]),
('S',
[(7.8682724000000004, -0.11933240000000001),
(1.8812884999999999, -0.1608542),
(0.54424930000000005, 1.1434564)]),
('P',
[(7.8682724000000004, 0.068999099999999994),
(1.8812884999999999, 0.31642399999999998),
(0.54424930000000005, 0.74430830000000003)]),
('S', [(0.16871439999999999, 1.0)]),
('P', [(0.16871439999999999, 1.0)])],
7: [('S',
[(4173.5110000000004, 0.0018347999999999999),
(627.4579, 0.013995),
(142.90209999999999, 0.068586999999999995),
(40.23433, 0.232241),
(12.820209999999999, 0.46906999999999999),
(4.3904370000000004, 0.36045500000000003)]),
('S',
[(11.626358, -0.11496099999999999),
(2.7162799999999998, -0.16911799999999999),
(0.77221799999999996, 1.1458520000000001)]),
('P',
[(11.626358, 0.067580000000000001),
(2.7162799999999998, 0.323907),
(0.77221799999999996, 0.74089499999999997)]),
('S', [(0.21203130000000001, 1.0)]),
('P', [(0.21203130000000001, 1.0)])],
8: [('S',
[(5484.6716999999999, 0.0018311),
(825.23495000000003, 0.0139501),
(188.04696000000001, 0.068445099999999995),
(52.964500000000001, 0.23271430000000001),
(16.897570000000002, 0.47019300000000003),
(5.7996353000000003, 0.35852089999999998)]),
('S',
[(15.539616000000001, -0.1107775),
(3.5999336, -0.1480263),
(1.0137617999999999, 1.1307670000000001)]),
('P',
[(15.539616000000001, 0.070874300000000001),
(3.5999336, 0.33975280000000002),
(1.0137617999999999, 0.72715859999999999)]),
('S', [(0.27000580000000002, 1.0)]),
('P', [(0.27000580000000002, 1.0)])],
9: [('S',
[(7001.7130900000002, 0.0018196168999999999),
(1051.36609, 0.013916079600000001),
(239.28568999999999, 0.068405324500000003),
(67.397445300000001, 0.23318575999999999),
(21.519957300000002, 0.47126743900000001),
(7.4031013000000003, 0.35661854599999998)]),
('S',
[(20.847952800000002, -0.10850697500000001),
(4.80830834, -0.14645165800000001),
(1.3440698600000001, 1.1286885799999999)]),
('P',
[(20.847952800000002, 0.071628724300000002),
(4.80830834, 0.34591210300000003),
(1.3440698600000001, 0.72246995700000005)]),
('S', [(0.35815139299999998, 1.0)]),
('P', [(0.35815139299999998, 1.0)])],
10: [('S',
[(8425.8515299999999, 0.0018843480999999999),
(1268.5193999999999, 0.0143368994),
(289.62141400000002, 0.070109623300000007),
(81.859003999999999, 0.237373266),
(26.2515079, 0.473007126),
(9.0947205100000001, 0.34840124099999997)]),
('S',
[(26.532131, -0.10711828700000001),
(6.1017550099999998, -0.146163821),
(1.69627153, 1.1277735)]),
('P',
[(26.532131, 0.071909588499999996),
(6.1017550099999998, 0.34951337199999999),
(1.69627153, 0.71994051199999998)]),
('S', [(0.44581870000000001, 1.0)]),
('P', [(0.44581870000000001, 1.0)])],
11: [('S',
[(9993.2000000000007, 0.0019377000000000001),
(1499.8900000000001, | |
. is_exact_match ( address ) ) : return ( OooOoOoo0OOoo )
if 32 - 32: OoooooooOO . I1Ii111 - I1ii11iIi11i
return ( None )
if 29 - 29: OoO0O00
if 33 - 33: I1ii11iIi11i - O0
if 72 - 72: Oo0Ooo * iII111i - I11i
if 81 - 81: I1Ii111
if 85 - 85: O0 % OoOoOO00 . I1ii11iIi11i
if 46 - 46: OOooOOo * iIii1I11I1II1
if 33 - 33: OoO0O00 * II111iiii / i1IIi
def lisp_get_any_map_server ( ) :
for OooOoOoo0OOoo in lisp_map_servers_list . values ( ) : return ( OooOoOoo0OOoo )
return ( None )
if 93 - 93: I1Ii111 % I11i
if 64 - 64: I1IiiI % OoOoOO00 / Oo0Ooo
if 40 - 40: Ii1I + iIii1I11I1II1 / oO0o . II111iiii % O0 - IiII
if 49 - 49: IiII - OOooOOo * OOooOOo . O0
if 60 - 60: OoOoOO00 % iIii1I11I1II1 + IiII % o0oOOo0O0Ooo
if 64 - 64: OoOoOO00 * I1ii11iIi11i . OoooooooOO . i1IIi
if 61 - 61: OoO0O00
if 100 - 100: OoOoOO00
if 97 - 97: OoooooooOO
if 91 - 91: o0oOOo0O0Ooo / O0 % OoO0O00
def lisp_get_map_resolver ( address , eid ) :
if ( address != None ) :
IiiIIi1 = address . print_address ( )
O0o00000o0O = None
for ii1i1I1111ii in lisp_map_resolvers_list :
if ( ii1i1I1111ii . find ( IiiIIi1 ) == - 1 ) : continue
O0o00000o0O = lisp_map_resolvers_list [ ii1i1I1111ii ]
if 35 - 35: iII111i % OoO0O00 * O0
return ( O0o00000o0O )
if 37 - 37: OOooOOo
if 100 - 100: Oo0Ooo * I1IiiI . ooOoO0o
if 53 - 53: OOooOOo + o0oOOo0O0Ooo * Ii1I + O0
if 75 - 75: OoooooooOO
if 24 - 24: I1Ii111 % i11iIiiIii % oO0o . OOooOOo % IiII
if 23 - 23: o0oOOo0O0Ooo * II111iiii - Oo0Ooo - I1IiiI
if 86 - 86: I1IiiI - II111iiii * II111iiii * oO0o % OoooooooOO * OoOoOO00
if ( eid == "" ) :
Oo0OOo0 = ""
elif ( eid == None ) :
Oo0OOo0 = "all"
else :
Ooooo00 = lisp_db_for_lookups . lookup_cache ( eid , False )
Oo0OOo0 = "all" if Ooooo00 == None else Ooooo00 . use_mr_name
if 100 - 100: iIii1I11I1II1 - I1IiiI
if 9 - 9: I11i % i1IIi / ooOoO0o % iII111i - oO0o - II111iiii
I1iiIi1iI1I11 = None
for O0o00000o0O in lisp_map_resolvers_list . values ( ) :
if ( Oo0OOo0 == "" ) : return ( O0o00000o0O )
if ( O0o00000o0O . mr_name != Oo0OOo0 ) : continue
if ( I1iiIi1iI1I11 == None or O0o00000o0O . last_used < I1iiIi1iI1I11 . last_used ) : I1iiIi1iI1I11 = O0o00000o0O
if 72 - 72: I1Ii111 . OoooooooOO . I1IiiI % o0oOOo0O0Ooo % i11iIiiIii
return ( I1iiIi1iI1I11 )
if 13 - 13: OoooooooOO
if 29 - 29: I1Ii111 + OOooOOo . OoooooooOO . II111iiii + OoO0O00 / OoooooooOO
if 61 - 61: ooOoO0o
if 4 - 4: Oo0Ooo + oO0o + oO0o
if 79 - 79: OoooooooOO
if 98 - 98: O0 . ooOoO0o * I1Ii111
if 98 - 98: ooOoO0o + o0oOOo0O0Ooo / I11i - Ii1I * II111iiii + i1IIi
if 10 - 10: oO0o
def lisp_get_decent_map_resolver ( eid ) :
ooo = lisp_get_decent_index ( eid )
II11Iii11III = str ( ooo ) + "." + lisp_decent_dns_suffix
if 33 - 33: Oo0Ooo % iIii1I11I1II1 - OoO0O00 - i1IIi / o0oOOo0O0Ooo
lprint ( "Use LISP-Decent map-resolver {} for EID {}" . format ( bold ( II11Iii11III , False ) , eid . print_prefix ( ) ) )
if 6 - 6: Oo0Ooo . IiII . IiII * Ii1I
if 1 - 1: i11iIiiIii
I1iiIi1iI1I11 = None
for O0o00000o0O in lisp_map_resolvers_list . values ( ) :
if ( II11Iii11III != O0o00000o0O . dns_name ) : continue
if ( I1iiIi1iI1I11 == None or O0o00000o0O . last_used < I1iiIi1iI1I11 . last_used ) : I1iiIi1iI1I11 = O0o00000o0O
if 91 - 91: I1ii11iIi11i . OoO0O00 / OoO0O00 / I1ii11iIi11i + iII111i
return ( I1iiIi1iI1I11 )
if 20 - 20: o0oOOo0O0Ooo . I1Ii111 + O0
if 99 - 99: O0 / IiII . oO0o
if 18 - 18: OoooooooOO * OoO0O00 * I1Ii111
if 12 - 12: i11iIiiIii / iIii1I11I1II1 . I11i % I1Ii111 * ooOoO0o % ooOoO0o
if 13 - 13: i1IIi . ooOoO0o . ooOoO0o
if 24 - 24: iIii1I11I1II1
if 72 - 72: i11iIiiIii + o0oOOo0O0Ooo % ooOoO0o * I1ii11iIi11i . i1IIi
def lisp_ipv4_input ( packet ) :
if 59 - 59: OoooooooOO - OoooooooOO - o0oOOo0O0Ooo + i1IIi % I1Ii111
if 74 - 74: IiII * iIii1I11I1II1 - I1IiiI
if 62 - 62: o0oOOo0O0Ooo
if 54 - 54: iIii1I11I1II1 / OoooooooOO + o0oOOo0O0Ooo . i1IIi - OoooooooOO
if ( ord ( packet [ 9 ] ) == 2 ) : return ( [ True , packet ] )
if 70 - 70: Ii1I / OoOoOO00 * Oo0Ooo
if 32 - 32: I1Ii111 . OoOoOO00 % OoooooooOO + I1Ii111 * OoO0O00
if 84 - 84: OoOoOO00
if 80 - 80: oO0o
Oo0 = struct . unpack ( "H" , packet [ 10 : 12 ] ) [ 0 ]
if ( Oo0 == 0 ) :
dprint ( "Packet arrived with checksum of 0!" )
else :
packet = lisp_ip_checksum ( packet )
Oo0 = struct . unpack ( "H" , packet [ 10 : 12 ] ) [ 0 ]
if ( Oo0 != 0 ) :
dprint ( "IPv4 header checksum failed for inner header" )
packet = lisp_format_packet ( packet [ 0 : 20 ] )
dprint ( "Packet header: {}" . format ( packet ) )
return ( [ False , None ] )
if 59 - 59: iIii1I11I1II1 / IiII % I1ii11iIi11i + OoO0O00 - I11i % OOooOOo
if 92 - 92: iII111i
if 96 - 96: OoOoOO00 / OoOoOO00 / OoOoOO00 + OoooooooOO + Oo0Ooo
if 91 - 91: OoOoOO00 + II111iiii / I11i * iIii1I11I1II1
if 92 - 92: I1Ii111 - IiII / IiII
if 42 - 42: IiII
if 7 - 7: iIii1I11I1II1
oo0o = struct . unpack ( "B" , packet [ 8 : 9 ] ) [ 0 ]
if ( oo0o == 0 ) :
dprint ( "IPv4 packet arrived with ttl 0, packet discarded" )
return ( [ False , None ] )
elif ( oo0o == 1 ) :
dprint ( "IPv4 packet {}, packet discarded" . format ( bold ( "ttl expiry" , False ) ) )
if 35 - 35: IiII + O0 % I1Ii111 - I1ii11iIi11i - i1IIi
return ( [ False , None ] )
if 100 - 100: I1Ii111 + i11iIiiIii - IiII / I1ii11iIi11i / iII111i
if 56 - 56: iII111i
oo0o -= 1
packet = packet [ 0 : 8 ] + struct . pack ( "B" , oo0o ) + packet [ 9 : : ]
packet = packet [ 0 : 10 ] + struct . pack ( "H" , 0 ) + packet [ 12 : : ]
packet = lisp_ip_checksum ( packet )
return ( [ False , packet ] )
if 91 - 91: Oo0Ooo . I11i . I1ii11iIi11i
if 60 - 60: i11iIiiIii - OOooOOo
if 78 - 78: I1IiiI * ooOoO0o % iIii1I11I1II1 / I1ii11iIi11i
if 61 - 61: I1Ii111 . Ii1I + OoooooooOO
if 98 - 98: OOooOOo . ooOoO0o . OoOoOO00 - I1Ii111 . i1IIi - iIii1I11I1II1
if 89 - 89: II111iiii * I1ii11iIi11i - I1IiiI
if 58 - 58: Ii1I / Oo0Ooo % IiII
def lisp_ipv6_input ( packet ) :
oO0o0 = packet . inner_dest
packet = packet . packet
if 33 - 33: II111iiii . OOooOOo % iIii1I11I1II1 - Oo0Ooo - OoOoOO00 % i11iIiiIii
if 60 - 60: iII111i . o0oOOo0O0Ooo
if 56 - 56: I1ii11iIi11i
if 89 - 89: Oo0Ooo + I1ii11iIi11i * o0oOOo0O0Ooo | |
<filename>tests/integration/test_s3.py
import io
import os
import ssl
import boto3
import gzip
import json
import time
import uuid
import unittest
import datetime
import requests
from io import BytesIO
from pytz import timezone
from urllib.parse import parse_qs, quote
from botocore.exceptions import ClientError
from six.moves.urllib import parse as urlparse
from six.moves.urllib.request import Request, urlopen
from localstack import config, constants
from botocore.client import Config
from localstack.utils import testutil
from localstack.constants import TEST_AWS_ACCESS_KEY_ID, TEST_AWS_SECRET_ACCESS_KEY, S3_VIRTUAL_HOSTNAME
from localstack.utils.aws import aws_stack
from localstack.services.s3 import s3_listener, s3_utils
from localstack.utils.common import (
short_uid, retry, get_service_protocol, to_bytes, safe_requests, to_str, new_tmp_file, rm_rf, load_file)
from localstack.services.awslambda.lambda_utils import LAMBDA_RUNTIME_PYTHON36
TEST_BUCKET_NAME_WITH_POLICY = 'test-bucket-policy-1'
TEST_QUEUE_FOR_BUCKET_WITH_NOTIFICATION = 'test_queue_for_bucket_notification_1'
TEST_BUCKET_WITH_VERSIONING = 'test-bucket-versioning-1'
TEST_BUCKET_NAME_2 = 'test-bucket-2'
TEST_KEY_2 = 'test-key-2'
TEST_GET_OBJECT_RANGE = 17
THIS_FOLDER = os.path.dirname(os.path.realpath(__file__))
TEST_LAMBDA_PYTHON_ECHO = os.path.join(THIS_FOLDER, 'lambdas', 'lambda_triggered_by_s3.py')
TEST_LAMBDA_PYTHON_DOWNLOAD_FROM_S3 = os.path.join(THIS_FOLDER, 'lambdas',
'lambda_triggered_by_sqs_download_s3_file.py')
BATCH_DELETE_BODY = """
<Delete xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
<Object>
<Key>%s</Key>
</Object>
<Object>
<Key>%s</Key>
</Object>
</Delete>
"""
class PutRequest(Request):
""" Class to handle putting with urllib """
def __init__(self, *args, **kwargs):
return Request.__init__(self, *args, **kwargs)
def get_method(self, *args, **kwargs):
return 'PUT'
# def test_host_and_path_addressing(wrapped):
# """ Decorator that runs a test method with both - path and host style addressing. """
# # TODO - needs to be fixed below!
# def wrapper(self):
# try:
# # test via path based addressing
# TestS3.OVERWRITTEN_CLIENT = aws_stack.connect_to_service('s3', config={'addressing_style': 'virtual'})
# wrapped()
# # test via host based addressing
# TestS3.OVERWRITTEN_CLIENT = aws_stack.connect_to_service('s3', config={'addressing_style': 'path'})
# wrapped()
# finally:
# # reset client
# TestS3.OVERWRITTEN_CLIENT = None
# return
class TestS3(unittest.TestCase):
OVERWRITTEN_CLIENT = None
def setUp(self):
self._s3_client = aws_stack.connect_to_service('s3')
self.sqs_client = aws_stack.connect_to_service('sqs')
@property
def s3_client(self):
return TestS3.OVERWRITTEN_CLIENT or self._s3_client
def test_create_bucket_via_host_name(self):
body = """<?xml version="1.0" encoding="UTF-8"?>
<CreateBucketConfiguration xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
<LocationConstraint>eu-central-1</LocationConstraint>
</CreateBucketConfiguration>"""
headers = aws_stack.mock_aws_request_headers('s3')
bucket_name = 'test-%s' % short_uid()
headers['Host'] = s3_utils.get_bucket_hostname(bucket_name)
response = requests.put(config.TEST_S3_URL, data=body, headers=headers, verify=False)
self.assertEquals(response.status_code, 200)
response = self.s3_client.get_bucket_location(Bucket=bucket_name)
self.assertEqual(response['ResponseMetadata']['HTTPStatusCode'], 200)
self.assertIn('LocationConstraint', response)
# @test_host_and_path_addressing
def test_bucket_policy(self):
# create test bucket
self.s3_client.create_bucket(Bucket=TEST_BUCKET_NAME_WITH_POLICY)
# put bucket policy
policy = {
'Version': '2012-10-17',
'Statement': {
'Action': ['s3:GetObject'],
'Effect': 'Allow',
'Resource': 'arn:aws:s3:::bucketName/*',
'Principal': {
'AWS': ['*']
}
}
}
response = self.s3_client.put_bucket_policy(
Bucket=TEST_BUCKET_NAME_WITH_POLICY,
Policy=json.dumps(policy)
)
self.assertEqual(response['ResponseMetadata']['HTTPStatusCode'], 204)
# retrieve and check policy config
saved_policy = self.s3_client.get_bucket_policy(Bucket=TEST_BUCKET_NAME_WITH_POLICY)['Policy']
self.assertEqual(json.loads(saved_policy), policy)
def test_s3_put_object_notification(self):
bucket_name = 'notif-%s' % short_uid()
key_by_path = 'key-by-hostname'
key_by_host = 'key-by-host'
queue_url, queue_attributes = self._create_test_queue()
self._create_test_notification_bucket(queue_attributes, bucket_name=bucket_name)
self.s3_client.put_bucket_versioning(Bucket=bucket_name, VersioningConfiguration={'Status': 'Enabled'})
# put an object where the bucket_name is in the path
obj = self.s3_client.put_object(Bucket=bucket_name, Key=key_by_path, Body='something')
# put an object where the bucket_name is in the host
headers = aws_stack.mock_aws_request_headers('s3')
headers['Host'] = s3_utils.get_bucket_hostname(bucket_name)
url = '{}/{}'.format(config.TEST_S3_URL, key_by_host)
# verify=False must be set as this test fails on travis because of an SSL error non-existent locally
response = requests.put(url, data='something else', headers=headers, verify=False)
self.assertTrue(response.ok)
self.assertEqual(self._get_test_queue_message_count(queue_url), '2')
response = self.sqs_client.receive_message(QueueUrl=queue_url)
messages = [json.loads(to_str(m['Body'])) for m in response['Messages']]
record = messages[0]['Records'][0]
self.assertIsNotNone(record['s3']['object']['versionId'])
self.assertEquals(record['s3']['object']['versionId'], obj['VersionId'])
# clean up
self.s3_client.put_bucket_versioning(Bucket=bucket_name, VersioningConfiguration={'Status': 'Disabled'})
self.sqs_client.delete_queue(QueueUrl=queue_url)
self._delete_bucket(bucket_name, [key_by_path, key_by_host])
def test_s3_upload_fileobj_with_large_file_notification(self):
bucket_name = 'notif-large-%s' % short_uid()
queue_url, queue_attributes = self._create_test_queue()
self._create_test_notification_bucket(queue_attributes, bucket_name=bucket_name)
# has to be larger than 64MB to be broken up into a multipart upload
file_size = 75000000
large_file = self.generate_large_file(file_size)
download_file = new_tmp_file()
try:
self.s3_client.upload_file(Bucket=bucket_name, Key=large_file.name, Filename=large_file.name)
self.assertEqual(self._get_test_queue_message_count(queue_url), '1')
# ensure that the first message's eventName is ObjectCreated:CompleteMultipartUpload
messages = self.sqs_client.receive_message(QueueUrl=queue_url, AttributeNames=['All'])
message = json.loads(messages['Messages'][0]['Body'])
self.assertEqual(message['Records'][0]['eventName'], 'ObjectCreated:CompleteMultipartUpload')
# download the file, check file size
self.s3_client.download_file(Bucket=bucket_name, Key=large_file.name, Filename=download_file)
self.assertEqual(os.path.getsize(download_file), file_size)
# clean up
self.sqs_client.delete_queue(QueueUrl=queue_url)
self._delete_bucket(bucket_name, large_file.name)
finally:
# clean up large files
large_file.close()
rm_rf(large_file.name)
rm_rf(download_file)
def test_s3_multipart_upload_with_small_single_part(self):
# In a multipart upload "Each part must be at least 5 MB in size, except the last part."
# https://docs.aws.amazon.com/AmazonS3/latest/API/mpUploadComplete.html
bucket_name = 'notif-large-%s' % short_uid()
key_by_path = 'key-by-hostname'
queue_url, queue_attributes = self._create_test_queue()
self._create_test_notification_bucket(queue_attributes, bucket_name=bucket_name)
# perform upload
self._perform_multipart_upload(bucket=bucket_name, key=key_by_path, zip=True)
self.assertEqual(self._get_test_queue_message_count(queue_url), '1')
# clean up
self.sqs_client.delete_queue(QueueUrl=queue_url)
self._delete_bucket(bucket_name, [key_by_path])
def test_invalid_range_error(self):
bucket_name = 'range-%s' % short_uid()
self.s3_client.create_bucket(Bucket=bucket_name)
self.s3_client.create_bucket(Bucket=bucket_name)
self.s3_client.put_object(Bucket=bucket_name, Key='steve', Body=b'is awesome')
try:
self.s3_client.get_object(Bucket=bucket_name, Key='steve', Range='bytes=1024-4096')
except ClientError as e:
self.assertEqual(e.response['Error']['Code'], 'InvalidRange')
# clean up
self._delete_bucket(bucket_name, ['steve'])
def test_range_key_not_exists(self):
bucket_name = 'range-%s' % short_uid()
self.s3_client.create_bucket(Bucket=bucket_name)
self.s3_client.create_bucket(Bucket=bucket_name)
with self.assertRaises(ClientError) as ctx:
self.s3_client.get_object(Bucket=bucket_name, Key='key', Range='bytes=1024-4096')
self.assertIn('NoSuchKey', str(ctx.exception))
# clean up
self._delete_bucket(bucket_name)
def test_upload_key_with_hash_prefix(self):
bucket_name = 'hash-%s' % short_uid()
self.s3_client.create_bucket(Bucket=bucket_name)
key_name = '#key-with-hash-prefix'
content = b'test 123'
self.s3_client.put_object(Bucket=bucket_name, Key=key_name, Body=content)
downloaded_object = self.s3_client.get_object(Bucket=bucket_name, Key=key_name)
downloaded_content = to_str(downloaded_object['Body'].read())
self.assertEqual(to_str(downloaded_content), to_str(content))
# clean up
self._delete_bucket(bucket_name, [key_name])
with self.assertRaises(Exception):
self.s3_client.head_object(Bucket=bucket_name, Key=key_name)
def test_s3_multipart_upload_acls(self):
bucket_name = 'test-bucket-%s' % short_uid()
self.s3_client.create_bucket(Bucket=bucket_name, ACL='public-read')
def check_permissions(key, expected_perms):
grants = self.s3_client.get_object_acl(Bucket=bucket_name, Key=key)['Grants']
grants = [g for g in grants if 'AllUsers' in g.get('Grantee', {}).get('URI', '')]
self.assertEquals(len(grants), 1)
permissions = grants[0]['Permission']
permissions = permissions if isinstance(permissions, list) else [permissions]
self.assertEquals(len(permissions), expected_perms)
# perform uploads (multipart and regular) and check ACLs
self.s3_client.put_object(Bucket=bucket_name, Key='acl-key0', Body='something')
check_permissions('acl-key0', 1)
self._perform_multipart_upload(bucket=bucket_name, key='acl-key1')
check_permissions('acl-key1', 1)
self._perform_multipart_upload(bucket=bucket_name, key='acl-key2', acl='public-read-write')
check_permissions('acl-key2', 2)
def test_s3_presigned_url_upload(self):
key_by_path = 'key-by-hostname'
bucket_name = 'notif-large-%s' % short_uid()
queue_url, queue_attributes = self._create_test_queue()
self._create_test_notification_bucket(queue_attributes, bucket_name=bucket_name)
self._perform_presigned_url_upload(bucket=bucket_name, key=key_by_path)
self.assertEqual(self._get_test_queue_message_count(queue_url), '1')
# clean up
self.sqs_client.delete_queue(QueueUrl=queue_url)
self._delete_bucket(bucket_name, [key_by_path])
def test_s3_get_response_default_content_type(self):
# When no content type is provided by a PUT request
# 'binary/octet-stream' should be used
# src: https://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectPUT.html
bucket_name = 'test-bucket-%s' % short_uid()
client = self._get_test_client()
client.create_bucket(Bucket=bucket_name)
# put object
object_key = 'key-by-hostname'
client.put_object(Bucket=bucket_name, Key=object_key, Body='something')
url = client.generate_presigned_url(
'get_object', Params={'Bucket': bucket_name, 'Key': object_key})
# get object and assert headers
response = requests.get(url, verify=False)
self.assertEqual(response.headers['content-type'], 'binary/octet-stream')
# clean up
self._delete_bucket(bucket_name, [object_key])
def test_s3_put_presigned_url_metadata(self):
# Object metadata should be passed as query params via presigned URL
# https://github.com/localstack/localstack/issues/544
bucket_name = 'test-bucket-%s' % short_uid()
client = self._get_test_client()
client.create_bucket(Bucket=bucket_name)
metadata = {
'foo': 'bar'
}
# put object
object_key = 'key-by-hostname'
url = client.generate_presigned_url(
'put_object', Params={'Bucket': bucket_name, 'Key': object_key, 'Metadata': metadata})
# append metadata manually to URL (this is not easily possible with boto3, as "Metadata" cannot
# be passed to generate_presigned_url, and generate_presigned_post works differently)
# get object and assert metadata is present
response = requests.put(url, data='content 123', verify=False)
self.assertLess(response.status_code, 400)
# response body should be empty, see https://github.com/localstack/localstack/issues/1317
self.assertEqual('', to_str(response.content))
response = client.head_object(Bucket=bucket_name, Key=object_key)
self.assertEquals('bar', response.get('Metadata', {}).get('foo'))
# clean up
self._delete_bucket(bucket_name, [object_key])
def test_s3_put_metadata_underscores(self):
# Object metadata keys should accept keys with underscores
# https://github.com/localstack/localstack/issues/1790
bucket_name = 'test-%s' % short_uid()
self.s3_client.create_bucket(Bucket=bucket_name)
# put object
object_key = 'key-with-metadata'
metadata = {'test_meta_1': 'foo', '__meta_2': 'bar'}
self.s3_client.put_object(Bucket=bucket_name, Key=object_key, Metadata=metadata, Body='foo')
metadata_saved = self.s3_client.head_object(Bucket=bucket_name, Key=object_key)['Metadata']
self.assertEqual(metadata, metadata_saved)
# clean up
self._delete_bucket(bucket_name, [object_key])
def test_s3_object_expiry(self):
# handle s3 object expiry
# https://github.com/localstack/localstack/issues/1685
bucket_name = 'test-%s' % short_uid()
self.s3_client.create_bucket(Bucket=bucket_name)
# put object
object_key = 'key-with-metadata'
metadata = {'test_meta_1': 'foo', '__meta_2': 'bar'}
self.s3_client.put_object(Bucket=bucket_name, Key=object_key, Metadata=metadata, Body='foo',
Expires=datetime.datetime.now(timezone('GMT')) - datetime.timedelta(hours=1))
# try to fetch an object which is already expired
self.assertRaises(Exception, self.s3_client.get_object, Bucket=bucket_name, Key=object_key.lower())
self.s3_client.put_object(Bucket=bucket_name, Key=object_key, Metadata=metadata, Body='foo',
Expires=datetime.datetime.now(timezone('GMT')) + datetime.timedelta(hours=1))
# try to fetch has not been expired yet.
resp = self.s3_client.get_object(Bucket=bucket_name, Key=object_key)
self.assertIn('Expires', resp)
# clean up
self._delete_bucket(bucket_name, [object_key])
def test_s3_presigned_url_expired(self):
bucket_name = 'test-bucket-%s' % short_uid()
client = self._get_test_client()
client.create_bucket(Bucket=bucket_name)
# put object and CORS configuration
object_key = 'key-by-hostname'
client.put_object(Bucket=bucket_name, Key=object_key, Body='something')
# get object and assert headers
url = client.generate_presigned_url(
'get_object', Params={'Bucket': bucket_name, 'Key': object_key}, ExpiresIn=2
)
# retrieving it before expiry
resp = requests.get(url, verify=False)
self.assertEqual(resp.status_code, 200)
self.assertEqual(to_str(resp.content), 'something')
# waiting for the url to expire
time.sleep(3)
resp = requests.get(url, verify=False)
self.assertEqual(resp.status_code, 403)
url = client.generate_presigned_url(
'get_object', Params={'Bucket': bucket_name, 'Key': object_key}, ExpiresIn=120
)
resp = requests.get(url, verify=False)
self.assertEqual(resp.status_code, 200)
self.assertEqual(to_str(resp.content), 'something')
# clean up
self._delete_bucket(bucket_name, [object_key])
def test_bucket_availability(self):
bucket_name = 'test-bucket-lifecycle'
returned_empty_lifecycle = s3_listener.get_lifecycle(bucket_name)
self.assertRegexpMatches(returned_empty_lifecycle._content, r'The bucket does not exist')
response = s3_listener.get_replication(bucket_name)
self.assertRegexpMatches(response._content, r'The bucket does not exist')
response = s3_listener.get_object_lock(bucket_name)
self.assertRegexpMatches(response._content, r'The bucket does not exist')
def test_delete_bucket_lifecycle_configuration(self):
bucket_name = 'test-bucket-%s' % short_uid()
client = self._get_test_client()
client.create_bucket(Bucket=bucket_name)
lfc = {
'Rules': [
{
'Expiration': {'Days': 7},
'ID': 'wholebucket',
'Filter': {'Prefix': ''},
'Status': 'Enabled',
}
]
}
client.put_bucket_lifecycle_configuration(
Bucket=bucket_name, LifecycleConfiguration=lfc
)
result = client.get_bucket_lifecycle_configuration(Bucket=bucket_name)
self.assertIn('Rules', result)
client.delete_bucket_lifecycle(Bucket=bucket_name)
try:
client.get_bucket_lifecycle_configuration(Bucket=bucket_name)
except ClientError as e:
self.assertEqual(e.response['Error']['Code'], 'NoSuchLifecycleConfiguration')
# clean up
client.delete_bucket(Bucket=bucket_name)
def test_delete_lifecycle_configuration_on_bucket_deletion(self):
bucket_name = 'test-bucket-%s' % short_uid()
client = self._get_test_client()
client.create_bucket(Bucket=bucket_name)
lfc = {
'Rules': [
{
'Expiration': {'Days': 7},
'ID': 'wholebucket',
'Filter': {'Prefix': ''},
'Status': 'Enabled',
}
]
}
client.put_bucket_lifecycle_configuration(
Bucket=bucket_name, LifecycleConfiguration=lfc
)
result = client.get_bucket_lifecycle_configuration(Bucket=bucket_name)
self.assertIn('Rules', result)
client.delete_bucket(Bucket=bucket_name)
client.create_bucket(Bucket=bucket_name)
try:
client.get_bucket_lifecycle_configuration(Bucket=bucket_name)
except ClientError as e:
self.assertEqual(e.response['Error']['Code'], 'NoSuchLifecycleConfiguration')
# clean up
client.delete_bucket(Bucket=bucket_name)
def test_range_header_body_length(self):
# Test for https://github.com/localstack/localstack/issues/1952
object_key = 'sample.bin'
bucket_name = 'test-%s' % short_uid()
self.s3_client.create_bucket(Bucket=bucket_name)
chunk_size = 1024
with io.BytesIO() as data:
data.write(os.urandom(chunk_size * 2))
data.seek(0)
self.s3_client.upload_fileobj(data, bucket_name, object_key)
range_header = 'bytes=0-%s' % (chunk_size | |
:param cloned_requirements_count: Number of Requirments cloned so far.
:type cloned_requirements_count: int
:param cloned_shared_steps_count: Number of shared steps cloned so far.
:type cloned_shared_steps_count: int
:param cloned_test_cases_count: Number of test cases cloned so far
:type cloned_test_cases_count: int
:param total_requirements_count: Total number of requirements to be cloned
:type total_requirements_count: int
:param total_test_cases_count: Total number of test cases to be cloned
:type total_test_cases_count: int
"""
_attribute_map = {
'cloned_requirements_count': {'key': 'clonedRequirementsCount', 'type': 'int'},
'cloned_shared_steps_count': {'key': 'clonedSharedStepsCount', 'type': 'int'},
'cloned_test_cases_count': {'key': 'clonedTestCasesCount', 'type': 'int'},
'total_requirements_count': {'key': 'totalRequirementsCount', 'type': 'int'},
'total_test_cases_count': {'key': 'totalTestCasesCount', 'type': 'int'}
}
def __init__(self, cloned_requirements_count=None, cloned_shared_steps_count=None, cloned_test_cases_count=None, total_requirements_count=None, total_test_cases_count=None):
super(CloneStatistics, self).__init__()
self.cloned_requirements_count = cloned_requirements_count
self.cloned_shared_steps_count = cloned_shared_steps_count
self.cloned_test_cases_count = cloned_test_cases_count
self.total_requirements_count = total_requirements_count
self.total_test_cases_count = total_test_cases_count
class CodeCoverageData(Model):
"""CodeCoverageData.
:param build_flavor: Flavor of build for which data is retrieved/published
:type build_flavor: str
:param build_platform: Platform of build for which data is retrieved/published
:type build_platform: str
:param coverage_stats: List of coverage data for the build
:type coverage_stats: list of :class:`CodeCoverageStatistics <test.v4_1.models.CodeCoverageStatistics>`
"""
_attribute_map = {
'build_flavor': {'key': 'buildFlavor', 'type': 'str'},
'build_platform': {'key': 'buildPlatform', 'type': 'str'},
'coverage_stats': {'key': 'coverageStats', 'type': '[CodeCoverageStatistics]'}
}
def __init__(self, build_flavor=None, build_platform=None, coverage_stats=None):
super(CodeCoverageData, self).__init__()
self.build_flavor = build_flavor
self.build_platform = build_platform
self.coverage_stats = coverage_stats
class CodeCoverageStatistics(Model):
"""CodeCoverageStatistics.
:param covered: Covered units
:type covered: int
:param delta: Delta of coverage
:type delta: float
:param is_delta_available: Is delta valid
:type is_delta_available: bool
:param label: Label of coverage data ("Blocks", "Statements", "Modules", etc.)
:type label: str
:param position: Position of label
:type position: int
:param total: Total units
:type total: int
"""
_attribute_map = {
'covered': {'key': 'covered', 'type': 'int'},
'delta': {'key': 'delta', 'type': 'float'},
'is_delta_available': {'key': 'isDeltaAvailable', 'type': 'bool'},
'label': {'key': 'label', 'type': 'str'},
'position': {'key': 'position', 'type': 'int'},
'total': {'key': 'total', 'type': 'int'}
}
def __init__(self, covered=None, delta=None, is_delta_available=None, label=None, position=None, total=None):
super(CodeCoverageStatistics, self).__init__()
self.covered = covered
self.delta = delta
self.is_delta_available = is_delta_available
self.label = label
self.position = position
self.total = total
class CodeCoverageSummary(Model):
"""CodeCoverageSummary.
:param build: Uri of build for which data is retrieved/published
:type build: :class:`ShallowReference <test.v4_1.models.ShallowReference>`
:param coverage_data: List of coverage data and details for the build
:type coverage_data: list of :class:`CodeCoverageData <test.v4_1.models.CodeCoverageData>`
:param delta_build: Uri of build against which difference in coverage is computed
:type delta_build: :class:`ShallowReference <test.v4_1.models.ShallowReference>`
"""
_attribute_map = {
'build': {'key': 'build', 'type': 'ShallowReference'},
'coverage_data': {'key': 'coverageData', 'type': '[CodeCoverageData]'},
'delta_build': {'key': 'deltaBuild', 'type': 'ShallowReference'}
}
def __init__(self, build=None, coverage_data=None, delta_build=None):
super(CodeCoverageSummary, self).__init__()
self.build = build
self.coverage_data = coverage_data
self.delta_build = delta_build
class CoverageStatistics(Model):
"""CoverageStatistics.
:param blocks_covered:
:type blocks_covered: int
:param blocks_not_covered:
:type blocks_not_covered: int
:param lines_covered:
:type lines_covered: int
:param lines_not_covered:
:type lines_not_covered: int
:param lines_partially_covered:
:type lines_partially_covered: int
"""
_attribute_map = {
'blocks_covered': {'key': 'blocksCovered', 'type': 'int'},
'blocks_not_covered': {'key': 'blocksNotCovered', 'type': 'int'},
'lines_covered': {'key': 'linesCovered', 'type': 'int'},
'lines_not_covered': {'key': 'linesNotCovered', 'type': 'int'},
'lines_partially_covered': {'key': 'linesPartiallyCovered', 'type': 'int'}
}
def __init__(self, blocks_covered=None, blocks_not_covered=None, lines_covered=None, lines_not_covered=None, lines_partially_covered=None):
super(CoverageStatistics, self).__init__()
self.blocks_covered = blocks_covered
self.blocks_not_covered = blocks_not_covered
self.lines_covered = lines_covered
self.lines_not_covered = lines_not_covered
self.lines_partially_covered = lines_partially_covered
class CustomTestField(Model):
"""CustomTestField.
:param field_name:
:type field_name: str
:param value:
:type value: object
"""
_attribute_map = {
'field_name': {'key': 'fieldName', 'type': 'str'},
'value': {'key': 'value', 'type': 'object'}
}
def __init__(self, field_name=None, value=None):
super(CustomTestField, self).__init__()
self.field_name = field_name
self.value = value
class CustomTestFieldDefinition(Model):
"""CustomTestFieldDefinition.
:param field_id:
:type field_id: int
:param field_name:
:type field_name: str
:param field_type:
:type field_type: object
:param scope:
:type scope: object
"""
_attribute_map = {
'field_id': {'key': 'fieldId', 'type': 'int'},
'field_name': {'key': 'fieldName', 'type': 'str'},
'field_type': {'key': 'fieldType', 'type': 'object'},
'scope': {'key': 'scope', 'type': 'object'}
}
def __init__(self, field_id=None, field_name=None, field_type=None, scope=None):
super(CustomTestFieldDefinition, self).__init__()
self.field_id = field_id
self.field_name = field_name
self.field_type = field_type
self.scope = scope
class DtlEnvironmentDetails(Model):
"""DtlEnvironmentDetails.
:param csm_content:
:type csm_content: str
:param csm_parameters:
:type csm_parameters: str
:param subscription_name:
:type subscription_name: str
"""
_attribute_map = {
'csm_content': {'key': 'csmContent', 'type': 'str'},
'csm_parameters': {'key': 'csmParameters', 'type': 'str'},
'subscription_name': {'key': 'subscriptionName', 'type': 'str'}
}
def __init__(self, csm_content=None, csm_parameters=None, subscription_name=None):
super(DtlEnvironmentDetails, self).__init__()
self.csm_content = csm_content
self.csm_parameters = csm_parameters
self.subscription_name = subscription_name
class FailingSince(Model):
"""FailingSince.
:param build:
:type build: :class:`BuildReference <test.v4_1.models.BuildReference>`
:param date:
:type date: datetime
:param release:
:type release: :class:`ReleaseReference <test.v4_1.models.ReleaseReference>`
"""
_attribute_map = {
'build': {'key': 'build', 'type': 'BuildReference'},
'date': {'key': 'date', 'type': 'iso-8601'},
'release': {'key': 'release', 'type': 'ReleaseReference'}
}
def __init__(self, build=None, date=None, release=None):
super(FailingSince, self).__init__()
self.build = build
self.date = date
self.release = release
class FieldDetailsForTestResults(Model):
"""FieldDetailsForTestResults.
:param field_name: Group by field name
:type field_name: str
:param groups_for_field: Group by field values
:type groups_for_field: list of object
"""
_attribute_map = {
'field_name': {'key': 'fieldName', 'type': 'str'},
'groups_for_field': {'key': 'groupsForField', 'type': '[object]'}
}
def __init__(self, field_name=None, groups_for_field=None):
super(FieldDetailsForTestResults, self).__init__()
self.field_name = field_name
self.groups_for_field = groups_for_field
class FunctionCoverage(Model):
"""FunctionCoverage.
:param class_:
:type class_: str
:param name:
:type name: str
:param namespace:
:type namespace: str
:param source_file:
:type source_file: str
:param statistics:
:type statistics: :class:`CoverageStatistics <test.v4_1.models.CoverageStatistics>`
"""
_attribute_map = {
'class_': {'key': 'class', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'namespace': {'key': 'namespace', 'type': 'str'},
'source_file': {'key': 'sourceFile', 'type': 'str'},
'statistics': {'key': 'statistics', 'type': 'CoverageStatistics'}
}
def __init__(self, class_=None, name=None, namespace=None, source_file=None, statistics=None):
super(FunctionCoverage, self).__init__()
self.class_ = class_
self.name = name
self.namespace = namespace
self.source_file = source_file
self.statistics = statistics
class GraphSubjectBase(Model):
"""GraphSubjectBase.
:param _links: This field contains zero or more interesting links about the graph subject. These links may be invoked to obtain additional relationships or more detailed information about this graph subject.
:type _links: :class:`ReferenceLinks <microsoft.-visual-studio.-services.-web-api.v4_1.models.ReferenceLinks>`
:param descriptor: The descriptor is the primary way to reference the graph subject while the system is running. This field will uniquely identify the same graph subject across both Accounts and Organizations.
:type descriptor: str
:param display_name: This is the non-unique display name of the graph subject. To change this field, you must alter its value in the source provider.
:type display_name: str
:param url: This url is the full route to the source resource of this graph subject.
:type url: str
"""
_attribute_map = {
'_links': {'key': '_links', 'type': 'ReferenceLinks'},
'descriptor': {'key': 'descriptor', 'type': 'str'},
'display_name': {'key': 'displayName', 'type': 'str'},
'url': {'key': 'url', 'type': 'str'}
}
def __init__(self, _links=None, descriptor=None, display_name=None, url=None):
super(GraphSubjectBase, self).__init__()
self._links = _links
self.descriptor = descriptor
self.display_name = display_name
self.url = url
class IdentityRef(GraphSubjectBase):
"""IdentityRef.
:param _links: This field contains zero or more interesting links about the graph subject. These links may be invoked to obtain additional relationships or more detailed information about this graph subject.
:type _links: :class:`ReferenceLinks <microsoft.-visual-studio.-services.-web-api.v4_1.models.ReferenceLinks>`
:param descriptor: The descriptor is the primary way to reference the graph subject while the system is running. This field will uniquely identify the same graph subject across both Accounts and Organizations.
:type descriptor: str
:param display_name: This is the non-unique display name of the graph subject. To change this field, you must alter its value in the source provider.
:type display_name: str
:param url: This url is the full route to the source resource of this graph subject.
:type url: str
:param directory_alias:
:type directory_alias: str
:param id:
:type id: str
:param image_url:
:type image_url: str
:param inactive:
:type inactive: bool
:param is_aad_identity:
:type is_aad_identity: bool
:param is_container:
:type is_container: bool
:param profile_url:
:type profile_url: str
:param unique_name:
:type unique_name: str
"""
_attribute_map = {
'_links': {'key': '_links', 'type': 'ReferenceLinks'},
'descriptor': {'key': 'descriptor', 'type': 'str'},
'display_name': {'key': 'displayName', 'type': 'str'},
'url': {'key': 'url', 'type': 'str'},
'directory_alias': {'key': 'directoryAlias', 'type': 'str'},
'id': {'key': 'id', 'type': 'str'},
'image_url': {'key': 'imageUrl', 'type': 'str'},
'inactive': {'key': 'inactive', 'type': 'bool'},
'is_aad_identity': {'key': 'isAadIdentity', 'type': 'bool'},
'is_container': {'key': 'isContainer', 'type': 'bool'},
'profile_url': {'key': 'profileUrl', 'type': 'str'},
'unique_name': {'key': 'uniqueName', 'type': 'str'}
}
def __init__(self, _links=None, descriptor=None, display_name=None, url=None, directory_alias=None, id=None, image_url=None, inactive=None, is_aad_identity=None, is_container=None, profile_url=None, unique_name=None):
super(IdentityRef, self).__init__(_links=_links, descriptor=descriptor, display_name=display_name, url=url)
self.directory_alias = directory_alias
self.id = id
self.image_url = image_url
self.inactive = inactive
self.is_aad_identity = is_aad_identity
self.is_container = is_container
self.profile_url = profile_url
self.unique_name = unique_name
class LastResultDetails(Model):
"""LastResultDetails.
:param date_completed:
:type date_completed: datetime
:param duration:
:type duration: long
:param run_by:
:type run_by: :class:`IdentityRef <test.v4_1.models.IdentityRef>`
"""
_attribute_map = {
'date_completed': {'key': 'dateCompleted', 'type': 'iso-8601'},
'duration': {'key': | |
del_objects=del_objects)
def delete_object(self, container, obj):
"""
Deletes the object from the specified container.
The 'obj' parameter can either be the name of the object, or a
StorageObject representing the object to be deleted.
"""
return self._manager.delete_object(container, obj)
def copy_object(self, container, obj, new_container, new_obj_name=None,
content_type=None, extra_info=None):
"""
Copies the object to the new container, optionally giving it a new name.
If you copy to the same container, you must supply a different name.
You can optionally change the content_type of the object by supplying
that in the 'content_type' parameter.
The 'extra_info' parameter is included for backwards compatibility. It
is no longer used at all, and will not be modified with swiftclient
info, since swiftclient is not used any more.
"""
return self._manager.copy_object(container, obj, new_container,
new_obj_name=new_obj_name, content_type=content_type)
def move_object(self, container, obj, new_container, new_obj_name=None,
new_reference=False, content_type=None, extra_info=None):
"""
Works just like copy_object, except that the source object is deleted
after a successful copy.
You can optionally change the content_type of the object by supplying
that in the 'content_type' parameter.
NOTE: any references to the original object will no longer be valid;
you will have to get a reference to the new object by passing True for
the 'new_reference' parameter. When this is True, a reference to the
newly moved object is returned. Otherwise, the etag for the moved
object is returned.
The 'extra_info' parameter is included for backwards compatibility. It
is no longer used at all, and will not be modified with swiftclient
info, since swiftclient is not used any more.
"""
return self._manager.move_object(container, obj, new_container,
new_obj_name=new_obj_name, new_reference=new_reference,
content_type=content_type)
def change_object_content_type(self, container, obj, new_ctype,
guess=False, extra_info=None):
"""
Copies object to itself, but applies a new content-type. The guess
feature requires the container to be CDN-enabled. If not then the
content-type must be supplied. If using guess with a CDN-enabled
container, new_ctype can be set to None. Failure during the put will
result in a swift exception.
The 'extra_info' parameter is included for backwards compatibility. It
is no longer used at all, and will not be modified with swiftclient
info, since swiftclient is not used any more.
"""
return self._manager.change_object_content_type(container, obj,
new_ctype, guess=guess)
def upload_folder(self, folder_path, container=None, ignore=None, ttl=None):
"""
Convenience method for uploading an entire folder, including any
sub-folders, to Cloud Files.
All files will be uploaded to objects with the same name as the file.
In the case of nested folders, files will be named with the full path
relative to the base folder. E.g., if the folder you specify contains a
folder named 'docs', and 'docs' contains a file named 'install.html',
that file will be uploaded to an object named 'docs/install.html'.
If 'container' is specified, the folder's contents will be uploaded to
that container. If it is not specified, a new container with the same
name as the specified folder will be created, and the files uploaded to
this new container.
You can selectively ignore files by passing either a single pattern or
a list of patterns; these will be applied to the individual folder and
file names, and any names that match any of the 'ignore' patterns will
not be uploaded. The patterns should be standard *nix-style shell
patterns; e.g., '*pyc' will ignore all files ending in 'pyc', such as
'program.pyc' and 'abcpyc'.
The upload will happen asynchronously; in other words, the call to
upload_folder() will generate a UUID and return a 2-tuple of (UUID,
total_bytes) immediately. Uploading will happen in the background; your
app can call get_uploaded(uuid) to get the current status of the
upload. When the upload is complete, the value returned by
get_uploaded(uuid) will match the total_bytes for the upload.
If you start an upload and need to cancel it, call
cancel_folder_upload(uuid), passing the uuid returned by the initial
call. It will then be up to you to either keep or delete the
partially-uploaded content.
If you specify a `ttl` parameter, the uploaded files will be deleted
after that number of seconds.
"""
if not os.path.isdir(folder_path):
raise exc.FolderNotFound("No such folder: '%s'" % folder_path)
ignore = utils.coerce_to_list(ignore)
total_bytes = utils.folder_size(folder_path, ignore)
upload_key = str(<KEY>())
self.folder_upload_status[upload_key] = {"continue": True,
"total_bytes": total_bytes,
"uploaded": 0,
}
self._upload_folder_in_background(folder_path, container, ignore,
upload_key, ttl)
return (upload_key, total_bytes)
def _upload_folder_in_background(self, folder_path, container, ignore,
upload_key, ttl=None):
"""Runs the folder upload in the background."""
uploader = FolderUploader(folder_path, container, ignore, upload_key,
self, ttl=ttl)
uploader.start()
def sync_folder_to_container(self, folder_path, container, delete=False,
include_hidden=False, ignore=None, ignore_timestamps=False,
object_prefix="", verbose=False):
"""
Compares the contents of the specified folder, and checks to make sure
that the corresponding object is present in the specified container. If
there is no remote object matching the local file, it is created. If a
matching object exists, the etag is examined to determine if the object
in the container matches the local file; if they differ, the container
is updated with the local file if the local file is newer when
`ignore_timestamps' is False (default). If `ignore_timestamps` is True,
the object is overwritten with the local file contents whenever the
etags differ. NOTE: the timestamp of a remote object is the time it was
uploaded, not the original modification time of the file stored in that
object. Unless 'include_hidden' is True, files beginning with an
initial period are ignored.
If the 'delete' option is True, any objects in the container that do
not have corresponding files in the local folder are deleted.
You can selectively ignore files by passing either a single pattern or
a list of patterns; these will be applied to the individual folder and
file names, and any names that match any of the 'ignore' patterns will
not be uploaded. The patterns should be standard *nix-style shell
patterns; e.g., '*pyc' will ignore all files ending in 'pyc', such as
'program.pyc' and 'abcpyc'.
If `object_prefix` is set it will be appended to the object name when
it is checked and uploaded to the container. For example, if you use
sync_folder_to_container("folderToSync/", myContainer,
object_prefix="imgFolder") it will upload the files to the
container/imgFolder/... instead of just container/...
Set `verbose` to True to make it print what is going on. It will
show which files are being uploaded and which ones are not and why.
"""
cont = self.get_container(container)
self._local_files = []
# Load a list of all the remote objects so we don't have to keep
# hitting the service
if verbose:
log = logging.getLogger("pyrax")
log.info("Loading remote object list (prefix=%s)", object_prefix)
data = cont.get_objects(prefix=object_prefix, full_listing=True)
self._remote_files = dict((d.name, d) for d in data)
self._sync_summary = {"total": 0,
"uploaded": 0,
"ignored": 0,
"older": 0,
"duplicate": 0,
"failed": 0,
"failure_reasons": [],
"deleted": 0,
}
self._sync_folder_to_container(folder_path, cont, prefix="",
delete=delete, include_hidden=include_hidden, ignore=ignore,
ignore_timestamps=ignore_timestamps,
object_prefix=object_prefix, verbose=verbose)
# Unset the _remote_files
self._remote_files = None
if verbose:
# Log the summary
summary = self._sync_summary
log.info("Folder sync completed at %s" % time.ctime())
log.info(" Total files processed: %s" % summary["total"])
log.info(" Number Uploaded: %s" % summary["uploaded"])
log.info(" Number Ignored: %s" % summary["ignored"])
log.info(" Number Skipped (older): %s" % summary["older"])
log.info(" Number Skipped (dupe): %s" % summary["duplicate"])
log.info(" Number Deleted: %s" % summary["deleted"])
log.info(" Number Failed: %s" % summary["failed"])
if summary["failed"]:
for reason in summary["failure_reasons"]:
log.info(" Reason: %s" % reason)
def _sync_folder_to_container(self, folder_path, container, prefix, delete,
include_hidden, ignore, ignore_timestamps, object_prefix, verbose):
"""
This is the internal method that is called recursively to handle
nested folder structures.
"""
fnames = os.listdir(folder_path)
ignore = utils.coerce_to_list(ignore)
log = logging.getLogger("pyrax")
if not include_hidden:
ignore.append(".*")
for fname in fnames:
if utils.match_pattern(fname, ignore):
self._sync_summary["ignored"] += 1
continue
pth = os.path.join(folder_path, fname)
if os.path.isdir(pth):
subprefix = fname
if prefix:
subprefix = os.path.join(prefix, subprefix)
self._sync_folder_to_container(pth, container, prefix=subprefix,
delete=delete, include_hidden=include_hidden,
ignore=ignore, ignore_timestamps=ignore_timestamps,
object_prefix=object_prefix, verbose=verbose)
continue
self._local_files.append(os.path.join(object_prefix, prefix,
fname))
local_etag = utils.get_checksum(pth)
if object_prefix:
prefix = os.path.join(object_prefix, prefix)
object_prefix = ""
fullname_with_prefix = os.path.join(prefix, fname)
try:
obj = self._remote_files[fullname_with_prefix]
obj_etag = obj.etag
except KeyError:
obj = None
obj_etag = None
if local_etag != obj_etag:
if not ignore_timestamps:
if obj:
obj_time_str = | |
<filename>qiskit/providers/ibmq/experiment/ibm_experiment_service.py<gh_stars>100-1000
# This code is part of Qiskit.
#
# (C) Copyright IBM 2021.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""IBM Quantum experiment service."""
import logging
import json
import copy
from typing import Optional, List, Dict, Union, Tuple, Any, Type
from datetime import datetime
from collections import defaultdict
from qiskit.providers.ibmq import accountprovider # pylint: disable=unused-import
from qiskit.providers.exceptions import QiskitBackendNotFoundError
from .constants import (ExperimentShareLevel, ResultQuality,
RESULT_QUALITY_FROM_API, RESULT_QUALITY_TO_API)
from .utils import map_api_error
from .device_component import DeviceComponent
from ..utils.converters import local_to_utc_str, utc_to_local
from ..api.clients.experiment import ExperimentClient
from ..api.exceptions import RequestsApiError
from ..ibmqbackend import IBMQRetiredBackend
from ..exceptions import IBMQApiError
from ..credentials import store_preferences
logger = logging.getLogger(__name__)
class IBMExperimentService:
"""Provides experiment related services.
This class is the main interface to invoke IBM Quantum
experiment service, which allows you to create, delete, update, query, and
retrieve experiments, experiment figures, and analysis results. The
``experiment`` attribute of
:class:`~qiskit.providers.ibmq.accountprovider.AccountProvider` is an
instance of this class, and the main syntax for using the service is
``provider.experiment.<action>``. For example::
from qiskit import IBMQ
provider = IBMQ.load_account()
# Retrieve all experiments.
experiments = provider.experiment.experiments()
# Retrieve experiments with filtering.
experiment_filtered = provider.experiment.experiments(backend_name='ibmq_athens')
# Retrieve a specific experiment using its ID.
experiment = provider.experiment.experiment(EXPERIMENT_ID)
# Upload a new experiment.
new_experiment_id = provider.experiment.create_experiment(
experiment_type="T1",
backend_name="ibmq_athens",
metadata={"qubits": 5}
)
# Update an experiment.
provider.experiment.update_experiment(
experiment_id=EXPERIMENT_ID,
share_level="Group"
)
# Delete an experiment.
provider.experiment.delete_experiment(EXPERIMENT_ID)
Similar syntax applies to analysis results and experiment figures.
"""
_default_preferences = {"auto_save": False}
def __init__(
self,
provider: 'accountprovider.AccountProvider'
) -> None:
"""IBMExperimentService constructor.
Args:
provider: IBM Quantum Experience account provider.
"""
super().__init__()
self._provider = provider
self._api_client = ExperimentClient(provider.credentials)
self._preferences = copy.deepcopy(self._default_preferences)
self._preferences.update(provider.credentials.preferences.get('experiments', {}))
def backends(self) -> List[Dict]:
"""Return a list of backends that can be used for experiments.
Returns:
A list of backends.
"""
return self._api_client.experiment_devices()
def create_experiment(
self,
experiment_type: str,
backend_name: str,
metadata: Optional[Dict] = None,
experiment_id: Optional[str] = None,
parent_id: Optional[str] = None,
job_ids: Optional[List[str]] = None,
tags: Optional[List[str]] = None,
notes: Optional[str] = None,
share_level: Optional[Union[str, ExperimentShareLevel]] = None,
start_datetime: Optional[Union[str, datetime]] = None,
json_encoder: Type[json.JSONEncoder] = json.JSONEncoder,
**kwargs: Any
) -> str:
"""Create a new experiment in the database.
Args:
experiment_type: Experiment type.
backend_name: Name of the backend the experiment ran on.
metadata: Experiment metadata.
experiment_id: Experiment ID. It must be in the ``uuid4`` format.
One will be generated if not supplied.
parent_id: The experiment ID of the parent experiment.
The parent experiment must exist, must be on the same backend as the child,
and an experiment cannot be its own parent.
job_ids: IDs of experiment jobs.
tags: Tags to be associated with the experiment.
notes: Freeform notes about the experiment.
share_level: The level at which the experiment is shared. This determines who can
view the experiment (but not update it). This defaults to "private"
for new experiments. Possible values include:
- private: The experiment is only visible to its owner (default)
- project: The experiment is shared within its project
- group: The experiment is shared within its group
- hub: The experiment is shared within its hub
- public: The experiment is shared publicly regardless of provider
start_datetime: Timestamp when the experiment started, in local time zone.
json_encoder: Custom JSON encoder to use to encode the experiment.
kwargs: Additional experiment attributes that are not supported and will be ignored.
Returns:
Experiment ID.
Raises:
IBMExperimentEntryExists: If the experiment already exits.
IBMQApiError: If the request to the server failed.
"""
# pylint: disable=arguments-differ
if kwargs:
logger.info("Keywords %s are not supported by IBM Quantum experiment service "
"and will be ignored.",
kwargs.keys())
data = {
'type': experiment_type,
'device_name': backend_name,
'hub_id': self._provider.credentials.hub,
'group_id': self._provider.credentials.group,
'project_id': self._provider.credentials.project
}
data.update(self._experiment_data_to_api(metadata=metadata,
experiment_id=experiment_id,
parent_id=parent_id,
job_ids=job_ids,
tags=tags,
notes=notes,
share_level=share_level,
start_dt=start_datetime))
with map_api_error(f"Experiment {experiment_id} already exists."):
response_data = self._api_client.experiment_upload(json.dumps(data, cls=json_encoder))
return response_data['uuid']
def update_experiment(
self,
experiment_id: str,
metadata: Optional[Dict] = None,
job_ids: Optional[List[str]] = None,
notes: Optional[str] = None,
tags: Optional[List[str]] = None,
share_level: Optional[Union[str, ExperimentShareLevel]] = None,
end_datetime: Optional[Union[str, datetime]] = None,
json_encoder: Type[json.JSONEncoder] = json.JSONEncoder,
**kwargs: Any,
) -> None:
"""Update an existing experiment.
Args:
experiment_id: Experiment ID.
metadata: Experiment metadata.
job_ids: IDs of experiment jobs.
notes: Freeform notes about the experiment.
tags: Tags to be associated with the experiment.
share_level: The level at which the experiment is shared. This determines who can
view the experiment (but not update it). This defaults to "private"
for new experiments. Possible values include:
- private: The experiment is only visible to its owner (default)
- project: The experiment is shared within its project
- group: The experiment is shared within its group
- hub: The experiment is shared within its hub
- public: The experiment is shared publicly regardless of provider
end_datetime: Timestamp for when the experiment ended, in local time.
json_encoder: Custom JSON encoder to use to encode the experiment.
kwargs: Additional experiment attributes that are not supported and will be ignored.
Raises:
IBMExperimentEntryNotFound: If the experiment does not exist.
IBMQApiError: If the request to the server failed.
"""
# pylint: disable=arguments-differ
if kwargs:
logger.info("Keywords %s are not supported by IBM Quantum experiment service "
"and will be ignored.",
kwargs.keys())
data = self._experiment_data_to_api(metadata=metadata,
job_ids=job_ids,
tags=tags,
notes=notes,
share_level=share_level,
end_dt=end_datetime)
if not data:
logger.warning("update_experiment() called with nothing to update.")
return
with map_api_error(f"Experiment {experiment_id} not found."):
self._api_client.experiment_update(experiment_id, json.dumps(data, cls=json_encoder))
def _experiment_data_to_api(
self,
metadata: Optional[Dict] = None,
experiment_id: Optional[str] = None,
parent_id: Optional[str] = None,
job_ids: Optional[List[str]] = None,
tags: Optional[List[str]] = None,
notes: Optional[str] = None,
share_level: Optional[Union[str, ExperimentShareLevel]] = None,
start_dt: Optional[Union[str, datetime]] = None,
end_dt: Optional[Union[str, datetime]] = None,
) -> Dict:
"""Convert experiment data to API request data.
Args:
metadata: Experiment metadata.
experiment_id: Experiment ID.
parent_id: Parent experiment ID
job_ids: IDs of experiment jobs.
tags: Tags to be associated with the experiment.
notes: Freeform notes about the experiment.
share_level: The level at which the experiment is shared.
start_dt: Experiment start time.
end_dt: Experiment end time.
Returns:
API request data.
"""
data = {} # type: Dict[str, Any]
if metadata:
data['extra'] = metadata
if experiment_id:
data['uuid'] = experiment_id
if parent_id:
data['parent_experiment_uuid'] = parent_id
if share_level:
if isinstance(share_level, str):
share_level = ExperimentShareLevel(share_level.lower())
data['visibility'] = share_level.value
if tags:
data['tags'] = tags
if job_ids:
data['jobs'] = job_ids
if notes:
data['notes'] = notes
if start_dt:
data['start_time'] = local_to_utc_str(start_dt)
if end_dt:
data['end_time'] = local_to_utc_str(end_dt)
return data
def experiment(
self,
experiment_id: str,
json_decoder: Type[json.JSONDecoder] = json.JSONDecoder
) -> Dict:
"""Retrieve a previously stored experiment.
Args:
experiment_id: Experiment ID.
json_decoder: Custom JSON decoder to use to decode the retrieved experiment.
Returns:
Retrieved experiment data.
Raises:
IBMExperimentEntryNotFound: If the experiment does not exist.
IBMQApiError: If the request to the server failed.
"""
with map_api_error(f"Experiment {experiment_id} not found."):
raw_data = self._api_client.experiment_get(experiment_id)
return self._api_to_experiment_data(json.loads(raw_data, cls=json_decoder))
def experiments(
self,
limit: Optional[int] = 10,
json_decoder: Type[json.JSONDecoder] = json.JSONDecoder,
device_components: Optional[List[Union[str, DeviceComponent]]] = None,
device_components_operator: Optional[str] = None,
experiment_type: Optional[str] = None,
experiment_type_operator: Optional[str] = None,
backend_name: Optional[str] = None,
tags: Optional[List[str]] = None,
tags_operator: Optional[str] = "OR",
start_datetime_after: Optional[datetime] = None,
start_datetime_before: Optional[datetime] = None,
hub: Optional[str] = None,
group: Optional[str] = None,
project: Optional[str] = None,
exclude_public: Optional[bool] = False,
public_only: Optional[bool] = False,
exclude_mine: Optional[bool] = False,
mine_only: Optional[bool] = False,
parent_id: Optional[str] = None,
sort_by: Optional[Union[str, List[str]]] = None,
**filters: Any
) -> List[Dict]:
"""Retrieve all experiments, with optional filtering.
By default, results returned are as inclusive as possible. For example,
if you don't specify any filters, all experiments visible to you
are returned. This includes your own experiments as well as
those shared with you, from all providers you have access to
(not just from the provider you used to invoke this experiment service).
Args:
limit: Number of experiments to retrieve. ``None`` indicates no limit.
json_decoder: Custom JSON decoder to | |
self._md_template.translate_cols_dict.values():
handler = (
lambda x: self._md_template.str_cols_handlers[key][x])
key = "%s_id" % key
# Check if we have either to query the table with required columns
# or the dynamic table
if key in get_table_cols(self._table, conn_handler):
result = conn_handler.execute_fetchone(
"SELECT {0} FROM qiita.{1} WHERE {2}=%s AND "
"sample_id=%s".format(key, self._table, self._id_column),
(self._md_template.id, self._id))[0]
return handler(result)
else:
return conn_handler.execute_fetchone(
"SELECT {0} FROM qiita.{1} WHERE "
"sample_id=%s".format(key, self._dynamic_table),
(self._id, ))[0]
else:
# The key is not available for the sample, so raise a KeyError
raise KeyError("Metadata category %s does not exists for sample %s"
" in template %d" %
(key, self._id, self._md_template.id))
def __setitem__(self, key, value):
r"""Sets the metadata value for the category `key`
Parameters
----------
key : str
The metadata category
value : obj
The new value for the category
"""
raise QiitaDBNotImplementedError()
def __delitem__(self, key):
r"""Removes the sample with sample id `key` from the database
Parameters
----------
key : str
The sample id
"""
raise QiitaDBNotImplementedError()
def __iter__(self):
r"""Iterator over the metadata keys
Returns
-------
Iterator
Iterator over the sample ids
See Also
--------
keys
"""
conn_handler = SQLConnectionHandler()
return iter(self._get_categories(conn_handler))
def __contains__(self, key):
r"""Checks if the metadata category `key` is present
Parameters
----------
key : str
The sample id
Returns
-------
bool
True if the metadata category `key` is present, false otherwise
"""
conn_handler = SQLConnectionHandler()
return key.lower() in self._get_categories(conn_handler)
def keys(self):
r"""Iterator over the metadata categories
Returns
-------
Iterator
Iterator over the sample ids
See Also
--------
__iter__
"""
return self.__iter__()
def values(self):
r"""Iterator over the metadata values, in metadata category order
Returns
-------
Iterator
Iterator over metadata values
"""
d = self._to_dict()
return d.values()
def items(self):
r"""Iterator over (category, value) tuples
Returns
-------
Iterator
Iterator over (category, value) tuples
"""
d = self._to_dict()
return d.items()
def get(self, key):
r"""Returns the metadata value for category `key`, or None if the
category `key` is not present
Parameters
----------
key : str
The metadata category
Returns
-------
Obj or None
The value object for the category `key`, or None if it is not
present
See Also
--------
__getitem__
"""
try:
return self[key]
except KeyError:
return None
class PrepSample(BaseSample):
r"""Class that models a sample present in a PrepTemplate.
See Also
--------
BaseSample
Sample
"""
_table = "common_prep_info"
_table_prefix = "prep_"
_column_table = "prep_columns"
_id_column = "prep_template_id"
def _check_template_class(self, md_template):
r"""Checks that md_template is of the correct type
Parameters
----------
md_template : PrepTemplate
The metadata template
Raises
------
IncompetentQiitaDeveloperError
If `md_template` is not a PrepTemplate object
"""
if not isinstance(md_template, PrepTemplate):
raise IncompetentQiitaDeveloperError()
class Sample(BaseSample):
r"""Class that models a sample present in a SampleTemplate.
See Also
--------
BaseSample
PrepSample
"""
_table = "required_sample_info"
_table_prefix = "sample_"
_column_table = "study_sample_columns"
_id_column = "study_id"
def _check_template_class(self, md_template):
r"""Checks that md_template is of the correct type
Parameters
----------
md_template : SampleTemplate
The metadata template
Raises
------
IncompetentQiitaDeveloperError
If `md_template` is not a SampleTemplate object
"""
if not isinstance(md_template, SampleTemplate):
raise IncompetentQiitaDeveloperError()
def __setitem__(self, column, value):
r"""Sets the metadata value for the category `column`
Parameters
----------
column : str
The column to update
value : str
The value to set. This is expected to be a str on the assumption
that psycopg2 will cast as necessary when updating.
Raises
------
QiitaDBColumnError
If the column does not exist in the table
"""
conn_handler = SQLConnectionHandler()
# try dynamic tables
exists_dynamic = conn_handler.execute_fetchone("""
SELECT EXISTS (
SELECT column_name
FROM information_schema.columns
WHERE table_name='{0}'
AND table_schema='qiita'
AND column_name='{1}')""".format(self._dynamic_table,
column))[0]
# try required_sample_info
exists_required = conn_handler.execute_fetchone("""
SELECT EXISTS (
SELECT column_name
FROM information_schema.columns
WHERE table_name='required_sample_info'
AND table_schema='qiita'
AND column_name='{0}')""".format(column))[0]
if exists_dynamic:
# catching error so we can check if the error is due to different
# column type or something else
try:
conn_handler.execute("""
UPDATE qiita.{0}
SET {1}=%s
WHERE sample_id=%s""".format(self._dynamic_table,
column), (value, self._id))
except Exception as e:
column_type = conn_handler.execute_fetchone("""
SELECT data_type
FROM information_schema.columns
WHERE column_name=%s AND table_schema='qiita'
""", (column,))[0]
value_type = type(value).__name__
if column_type != value_type:
raise ValueError(
'The new value being added to column: "{0}" is "{1}" '
'(type: "{2}"). However, this column in the DB is of '
'type "{3}". Please change the value in your updated '
'template or reprocess your sample template.'.format(
column, value, value_type, column_type))
else:
raise e
elif exists_required:
# here is not required the type check as the required fields have
# an explicit type check
conn_handler.execute("""
UPDATE qiita.required_sample_info
SET {0}=%s
WHERE sample_id=%s
""".format(column), (value, self._id))
else:
raise QiitaDBColumnError("Column %s does not exist in %s" %
(column, self._dynamic_table))
class MetadataTemplate(QiitaObject):
r"""Metadata map object that accesses the db to get the sample/prep
template information
Attributes
----------
id
Methods
-------
create
exists
__len__
__getitem__
__setitem__
__delitem__
__iter__
__contains__
keys
values
items
get
to_file
add_filepath
See Also
--------
QiitaObject
SampleTemplate
PrepTemplate
"""
# Used to find the right SQL tables - should be defined on the subclasses
_table_prefix = None
_column_table = None
_id_column = None
_sample_cls = None
def _check_id(self, id_, conn_handler=None):
r"""Checks that the MetadataTemplate id_ exists on the database"""
self._check_subclass()
conn_handler = (conn_handler if conn_handler is not None
else SQLConnectionHandler())
return conn_handler.execute_fetchone(
"SELECT EXISTS(SELECT * FROM qiita.{0} WHERE "
"{1}=%s)".format(self._table, self._id_column),
(id_, ))[0]
@classmethod
def _table_name(cls, obj_id):
r"""Returns the dynamic table name
Parameters
----------
obj_id : int
The id of the metadata template
Returns
-------
str
The table name
Raises
------
IncompetentQiitaDeveloperError
If called from the base class directly
"""
if not cls._table_prefix:
raise IncompetentQiitaDeveloperError(
"_table_prefix should be defined in the subclasses")
return "%s%d" % (cls._table_prefix, obj_id)
@classmethod
def _check_special_columns(cls, md_template, obj):
r"""Checks for special columns based on obj type
Parameters
----------
md_template : DataFrame
The metadata template file contents indexed by sample ids
obj : Study or RawData
The obj to which the metadata template belongs to. Study in case
of SampleTemplate and RawData in case of PrepTemplate
"""
# Check required columns
missing = set(cls.translate_cols_dict.values()).difference(md_template)
if not missing:
# Change any *_id column to its str column
for key, value in viewitems(cls.translate_cols_dict):
handler = cls.id_cols_handlers[key]
md_template[key] = pd.Series(
[handler[i] for i in md_template[value]],
index=md_template.index)
del md_template[value]
return missing.union(
cls._check_template_special_columns(md_template, obj))
@classmethod
def delete(cls, id_):
r"""Deletes the table from the database
Parameters
----------
id_ : obj
The object identifier
Raises
------
QiitaDBUnknownIDError
If no metadata_template with id id_ exists
"""
if not cls.exists(id_):
raise QiitaDBUnknownIDError(id_, cls.__name__)
table_name = cls._table_name(id_)
conn_handler = SQLConnectionHandler()
# Delete the sample template filepaths
conn_handler.execute(
"DELETE FROM qiita.sample_template_filepath WHERE "
"study_id = %s", (id_, ))
conn_handler.execute(
"DROP TABLE qiita.{0}".format(table_name))
conn_handler.execute(
"DELETE FROM qiita.{0} where {1} = %s".format(cls._table,
cls._id_column),
(id_,))
conn_handler.execute(
"DELETE FROM qiita.{0} where {1} = %s".format(cls._column_table,
cls._id_column),
(id_,))
@classmethod
def exists(cls, obj_id):
r"""Checks if already exists a MetadataTemplate for the provided object
Parameters
----------
obj_id : int
The id to test if it exists on the database
Returns
-------
bool
True if already exists. False otherwise.
"""
cls._check_subclass()
return exists_table(cls._table_name(obj_id), SQLConnectionHandler())
def _get_sample_ids(self, conn_handler):
r"""Returns all the available samples for the metadata template
Parameters
----------
conn_handler : SQLConnectionHandler
The connection handler object connected to the DB
Returns
-------
set of str
The set of all available sample ids
"""
sample_ids = conn_handler.execute_fetchall(
"SELECT sample_id FROM qiita.{0} WHERE "
"{1}=%s".format(self._table, self._id_column),
(self._id, ))
return set(sample_id[0] for sample_id in sample_ids)
def __len__(self):
r"""Returns the number of samples in the metadata template
Returns
-------
int
The number of samples in the metadata template
"""
conn_handler = SQLConnectionHandler()
return len(self._get_sample_ids(conn_handler))
def __getitem__(self, key):
r"""Returns the metadata values for sample id `key`
Parameters
----------
key : str
The sample id
Returns
-------
Sample
The sample object for the sample id `key`
Raises
------
KeyError
If the sample id `key` is not present in the metadata template
See Also
--------
get
"""
if key in self:
return self._sample_cls(key, self)
else:
raise KeyError("Sample id %s does not exists in template %d"
% (key, self._id))
def __setitem__(self, key, value):
r"""Sets the metadata values for sample id `key`
Parameters
----------
key : str
The sample id
value : Sample
The sample obj | |
values include: "Default", "Copy", "Secondary", "PointInTimeRestore",
"Restore", "Recovery", "RestoreExternalBackup", "RestoreExternalBackupSecondary",
"RestoreLongTermRetentionBackup", "OnlineSecondary".
:type create_mode: str or ~azure.mgmt.sql.models.CreateMode
:param collation: The collation of the database.
:type collation: str
:param max_size_bytes: The max size of the database expressed in bytes.
:type max_size_bytes: long
:param sample_name: The name of the sample schema to apply when creating this database.
Possible values include: "AdventureWorksLT", "WideWorldImportersStd", "WideWorldImportersFull".
:type sample_name: str or ~azure.mgmt.sql.models.SampleName
:param elastic_pool_id: The resource identifier of the elastic pool containing this database.
:type elastic_pool_id: str
:param source_database_id: The resource identifier of the source database associated with
create operation of this database.
:type source_database_id: str
:ivar status: The status of the database. Possible values include: "Online", "Restoring",
"RecoveryPending", "Recovering", "Suspect", "Offline", "Standby", "Shutdown", "EmergencyMode",
"AutoClosed", "Copying", "Creating", "Inaccessible", "OfflineSecondary", "Pausing", "Paused",
"Resuming", "Scaling", "OfflineChangingDwPerformanceTiers", "OnlineChangingDwPerformanceTiers",
"Disabled".
:vartype status: str or ~azure.mgmt.sql.models.DatabaseStatus
:ivar database_id: The ID of the database.
:vartype database_id: str
:ivar creation_date: The creation date of the database (ISO8601 format).
:vartype creation_date: ~datetime.datetime
:ivar current_service_objective_name: The current service level objective name of the database.
:vartype current_service_objective_name: str
:ivar requested_service_objective_name: The requested service level objective name of the
database.
:vartype requested_service_objective_name: str
:ivar default_secondary_location: The default secondary region for this database.
:vartype default_secondary_location: str
:ivar failover_group_id: Failover Group resource identifier that this database belongs to.
:vartype failover_group_id: str
:param restore_point_in_time: Specifies the point in time (ISO8601 format) of the source
database that will be restored to create the new database.
:type restore_point_in_time: ~datetime.datetime
:param source_database_deletion_date: Specifies the time that the database was deleted.
:type source_database_deletion_date: ~datetime.datetime
:param recovery_services_recovery_point_id: The resource identifier of the recovery point
associated with create operation of this database.
:type recovery_services_recovery_point_id: str
:param long_term_retention_backup_resource_id: The resource identifier of the long term
retention backup associated with create operation of this database.
:type long_term_retention_backup_resource_id: str
:param recoverable_database_id: The resource identifier of the recoverable database associated
with create operation of this database.
:type recoverable_database_id: str
:param restorable_dropped_database_id: The resource identifier of the restorable dropped
database associated with create operation of this database.
:type restorable_dropped_database_id: str
:param catalog_collation: Collation of the metadata catalog. Possible values include:
"DATABASE_DEFAULT", "SQL_Latin1_General_CP1_CI_AS".
:type catalog_collation: str or ~azure.mgmt.sql.models.CatalogCollationType
:param zone_redundant: Whether or not this database is zone redundant, which means the replicas
of this database will be spread across multiple availability zones.
:type zone_redundant: bool
:param license_type: The license type to apply for this database. ``LicenseIncluded`` if you
need a license, or ``BasePrice`` if you have a license and are eligible for the Azure Hybrid
Benefit. Possible values include: "LicenseIncluded", "BasePrice".
:type license_type: str or ~azure.mgmt.sql.models.DatabaseLicenseType
:ivar max_log_size_bytes: The max log size for this database.
:vartype max_log_size_bytes: long
:ivar earliest_restore_date: This records the earliest start date and time that restore is
available for this database (ISO8601 format).
:vartype earliest_restore_date: ~datetime.datetime
:param read_scale: The state of read-only routing. If enabled, connections that have
application intent set to readonly in their connection string may be routed to a readonly
secondary replica in the same region. Possible values include: "Enabled", "Disabled".
:type read_scale: str or ~azure.mgmt.sql.models.DatabaseReadScale
:param high_availability_replica_count: The number of secondary replicas associated with the
database that are used to provide high availability.
:type high_availability_replica_count: int
:param secondary_type: The secondary type of the database if it is a secondary. Valid values
are Geo and Named. Possible values include: "Geo", "Named".
:type secondary_type: str or ~azure.mgmt.sql.models.SecondaryType
:ivar current_sku: The name and tier of the SKU.
:vartype current_sku: ~azure.mgmt.sql.models.Sku
:param auto_pause_delay: Time in minutes after which database is automatically paused. A value
of -1 means that automatic pause is disabled.
:type auto_pause_delay: int
:ivar current_backup_storage_redundancy: The storage account type used to store backups for
this database. Possible values include: "Geo", "Local", "Zone".
:vartype current_backup_storage_redundancy: str or
~azure.mgmt.sql.models.CurrentBackupStorageRedundancy
:param requested_backup_storage_redundancy: The storage account type to be used to store
backups for this database. Possible values include: "Geo", "Local", "Zone".
:type requested_backup_storage_redundancy: str or
~azure.mgmt.sql.models.RequestedBackupStorageRedundancy
:param min_capacity: Minimal capacity that database will always have allocated, if not paused.
:type min_capacity: float
:ivar paused_date: The date when database was paused by user configuration or action(ISO8601
format). Null if the database is ready.
:vartype paused_date: ~datetime.datetime
:ivar resumed_date: The date when database was resumed by user action or database login
(ISO8601 format). Null if the database is paused.
:vartype resumed_date: ~datetime.datetime
:param maintenance_configuration_id: Maintenance configuration id assigned to the database.
This configuration defines the period when the maintenance updates will occur.
:type maintenance_configuration_id: str
:param is_ledger_on: Whether or not this database is a ledger database, which means all tables
in the database are ledger tables. Note: the value of this property cannot be changed after the
database has been created.
:type is_ledger_on: bool
:ivar is_infra_encryption_enabled: Infra encryption is enabled for this database.
:vartype is_infra_encryption_enabled: bool
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'location': {'required': True},
'kind': {'readonly': True},
'managed_by': {'readonly': True},
'status': {'readonly': True},
'database_id': {'readonly': True},
'creation_date': {'readonly': True},
'current_service_objective_name': {'readonly': True},
'requested_service_objective_name': {'readonly': True},
'default_secondary_location': {'readonly': True},
'failover_group_id': {'readonly': True},
'max_log_size_bytes': {'readonly': True},
'earliest_restore_date': {'readonly': True},
'current_sku': {'readonly': True},
'current_backup_storage_redundancy': {'readonly': True},
'paused_date': {'readonly': True},
'resumed_date': {'readonly': True},
'is_infra_encryption_enabled': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'sku': {'key': 'sku', 'type': 'Sku'},
'kind': {'key': 'kind', 'type': 'str'},
'managed_by': {'key': 'managedBy', 'type': 'str'},
'create_mode': {'key': 'properties.createMode', 'type': 'str'},
'collation': {'key': 'properties.collation', 'type': 'str'},
'max_size_bytes': {'key': 'properties.maxSizeBytes', 'type': 'long'},
'sample_name': {'key': 'properties.sampleName', 'type': 'str'},
'elastic_pool_id': {'key': 'properties.elasticPoolId', 'type': 'str'},
'source_database_id': {'key': 'properties.sourceDatabaseId', 'type': 'str'},
'status': {'key': 'properties.status', 'type': 'str'},
'database_id': {'key': 'properties.databaseId', 'type': 'str'},
'creation_date': {'key': 'properties.creationDate', 'type': 'iso-8601'},
'current_service_objective_name': {'key': 'properties.currentServiceObjectiveName', 'type': 'str'},
'requested_service_objective_name': {'key': 'properties.requestedServiceObjectiveName', 'type': 'str'},
'default_secondary_location': {'key': 'properties.defaultSecondaryLocation', 'type': 'str'},
'failover_group_id': {'key': 'properties.failoverGroupId', 'type': 'str'},
'restore_point_in_time': {'key': 'properties.restorePointInTime', 'type': 'iso-8601'},
'source_database_deletion_date': {'key': 'properties.sourceDatabaseDeletionDate', 'type': 'iso-8601'},
'recovery_services_recovery_point_id': {'key': 'properties.recoveryServicesRecoveryPointId', 'type': 'str'},
'long_term_retention_backup_resource_id': {'key': 'properties.longTermRetentionBackupResourceId', 'type': 'str'},
'recoverable_database_id': {'key': 'properties.recoverableDatabaseId', 'type': 'str'},
'restorable_dropped_database_id': {'key': 'properties.restorableDroppedDatabaseId', 'type': 'str'},
'catalog_collation': {'key': 'properties.catalogCollation', 'type': 'str'},
'zone_redundant': {'key': 'properties.zoneRedundant', 'type': 'bool'},
'license_type': {'key': 'properties.licenseType', 'type': 'str'},
'max_log_size_bytes': {'key': 'properties.maxLogSizeBytes', 'type': 'long'},
'earliest_restore_date': {'key': 'properties.earliestRestoreDate', 'type': 'iso-8601'},
'read_scale': {'key': 'properties.readScale', 'type': 'str'},
'high_availability_replica_count': {'key': 'properties.highAvailabilityReplicaCount', 'type': 'int'},
'secondary_type': {'key': 'properties.secondaryType', 'type': 'str'},
'current_sku': {'key': 'properties.currentSku', 'type': 'Sku'},
'auto_pause_delay': {'key': 'properties.autoPauseDelay', 'type': 'int'},
'current_backup_storage_redundancy': {'key': 'properties.currentBackupStorageRedundancy', 'type': 'str'},
'requested_backup_storage_redundancy': {'key': 'properties.requestedBackupStorageRedundancy', 'type': 'str'},
'min_capacity': {'key': 'properties.minCapacity', 'type': 'float'},
'paused_date': {'key': 'properties.pausedDate', 'type': 'iso-8601'},
'resumed_date': {'key': 'properties.resumedDate', 'type': 'iso-8601'},
'maintenance_configuration_id': {'key': 'properties.maintenanceConfigurationId', 'type': 'str'},
'is_ledger_on': {'key': 'properties.isLedgerOn', 'type': 'bool'},
'is_infra_encryption_enabled': {'key': 'properties.isInfraEncryptionEnabled', 'type': 'bool'},
}
def __init__(
self,
**kwargs
):
super(Database, self).__init__(**kwargs)
self.sku = kwargs.get('sku', None)
self.kind = None
self.managed_by = None
self.create_mode = kwargs.get('create_mode', None)
self.collation = kwargs.get('collation', None)
self.max_size_bytes = kwargs.get('max_size_bytes', None)
self.sample_name = kwargs.get('sample_name', None)
self.elastic_pool_id = kwargs.get('elastic_pool_id', None)
self.source_database_id = kwargs.get('source_database_id', None)
self.status = None
self.database_id = None
self.creation_date = None
self.current_service_objective_name = None
self.requested_service_objective_name = None
self.default_secondary_location = None
self.failover_group_id = None
self.restore_point_in_time = kwargs.get('restore_point_in_time', None)
self.source_database_deletion_date = kwargs.get('source_database_deletion_date', None)
self.recovery_services_recovery_point_id = kwargs.get('recovery_services_recovery_point_id', None)
self.long_term_retention_backup_resource_id = kwargs.get('long_term_retention_backup_resource_id', None)
self.recoverable_database_id = kwargs.get('recoverable_database_id', None)
self.restorable_dropped_database_id = kwargs.get('restorable_dropped_database_id', None)
self.catalog_collation = kwargs.get('catalog_collation', None)
self.zone_redundant = kwargs.get('zone_redundant', None)
self.license_type = kwargs.get('license_type', None)
self.max_log_size_bytes = None
self.earliest_restore_date = None
self.read_scale = kwargs.get('read_scale', None)
self.high_availability_replica_count = kwargs.get('high_availability_replica_count', None)
self.secondary_type = kwargs.get('secondary_type', None)
self.current_sku = None
self.auto_pause_delay = kwargs.get('auto_pause_delay', None)
self.current_backup_storage_redundancy = None
self.requested_backup_storage_redundancy = kwargs.get('requested_backup_storage_redundancy', None)
self.min_capacity = kwargs.get('min_capacity', None)
self.paused_date = None
self.resumed_date = None
self.maintenance_configuration_id = kwargs.get('maintenance_configuration_id', None)
self.is_ledger_on = kwargs.get('is_ledger_on', None)
self.is_infra_encryption_enabled = None
class DatabaseAutomaticTuning(ProxyResource):
"""Database-level Automatic Tuning.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Resource ID.
:vartype id: str
:ivar name: Resource name.
:vartype name: str
:ivar type: Resource type.
:vartype type: str
:param desired_state: Automatic tuning desired state. Possible values include: "Inherit",
"Custom", "Auto", "Unspecified".
:type desired_state: str or ~azure.mgmt.sql.models.AutomaticTuningMode
:ivar actual_state: Automatic tuning actual state. Possible values include: "Inherit",
"Custom", | |
import numpy as np
import matplotlib.pyplot as plt
from qibo.models import Circuit
from qibo import gates
import aux_functions as aux
def rw_circuit(qubits, parameters, X=True):
"""Circuit that implements the amplitude distributor part of the option pricing algorithm.
Args:
qubits (int): number of qubits used for the unary basis.
paramters (list): values to be introduces into the fSim gates for amplitude distribution.
X (bool): whether or not the first X gate is executed.
Returns:
generator that yield the gates needed for the amplitude distributor circuit
"""
if qubits%2==0:
mid1 = int(qubits/2)
mid0 = int(mid1-1)
if X:
yield gates.X(mid1)
yield gates.fSim(mid1, mid0, parameters[mid0]/2, 0)
for i in range(mid0):
yield gates.fSim(mid0-i, mid0-i-1, parameters[mid0-i-1]/2, 0)
yield gates.fSim(mid1+i, mid1+i+1, parameters[mid1+i]/2, 0)
else:
mid = int((qubits-1)/2)
if X:
yield gates.X(mid)
for i in range(mid):
yield gates.fSim(mid-i, mid-i-1, parameters[mid-i-1]/2, 0)
yield gates.fSim(mid+i, mid+i+1, parameters[mid+i]/2, 0)
def rw_circuit_inv(qubits, parameters, X=True):
"""Circuit that implements the amplitude distributor part of the option pricing algorithm in reverse.
Used in the amplitude estimation part of the algorithm.
Args:
qubits (int): number of qubits used for the unary basis.
paramters (list): values to be introduces into the fSim gates for amplitude distribution.
X (bool): whether or not the first X gate is executed.
Returns:
generator that yield the gates needed for the amplitude distributor circuit in reverse order.
"""
if qubits%2==0:
mid1 = int(qubits/2)
mid0 = int(mid1-1)
for i in range(mid0 - 1, -1, -1):
yield gates.fSim(mid0 - i, mid0 - i - 1, -parameters[mid0 - i - 1]/2, 0)
yield gates.fSim(mid1 + i, mid1 + i + 1, -parameters[mid1 + i]/2, 0)
yield gates.fSim(mid1, mid0, -parameters[mid0]/2, 0)
if X:
yield gates.X(mid1)
else:
mid = int((qubits-1)/2)
for i in range(mid - 1, -1, -1):
yield gates.fSim(mid + i, mid + i + 1, -parameters[mid + i] / 2, 0)
yield gates.fSim(mid - i, mid - i - 1, -parameters[mid - i - 1] / 2, 0)
if X:
yield gates.X(mid)
def create_qc(qubits):
"""Creation of the quantum circuit and registers where the circuit will be implemented.
Args:
qubits (int): number of qubits used for the unary basis.
Returns:
q (list): quantum register encoding the asset's price in the unary bases.
ancilla (int): qubit that encodes the payoff of the options.
circuit (Circuit): quantum circuit with enough allocated space for the algorithm to run.
"""
q = [i for i in range(qubits)]
ancilla = qubits
circuit = Circuit(qubits+1)
return q, ancilla, circuit
def rw_parameters(qubits, pdf):
"""Parameters that encode a target probability distribution into the unary basis
Args:
qubits (int): number of qubits used for the unary basis.
pdf (list): known probability distribution function that wants to be reproduced.
Returns:
paramters (list): values to be introduces into the fSim gates for amplitude distribution.
"""
if qubits%2==0:
mid = qubits // 2
else:
mid = (qubits-1)//2 #Important to keep track of the centre
last = 1
parameters = []
for i in range(mid-1):
angle = 2 * np.arctan(np.sqrt(pdf[i]/(pdf[i+1] * last)))
parameters.append(angle)
last = (np.cos(angle/2))**2 #The last solution is needed to solve the next one
angle = 2 * np.arcsin(np.sqrt(pdf[mid-1]/last))
parameters.append(angle)
last = (np.cos(angle/2))**2
for i in range(mid, qubits-1):
angle = 2 * np.arccos(np.sqrt(pdf[i]/last))
parameters.append(angle)
last *= (np.sin(angle/2))**2
return parameters
def measure_probability(q):
"""Measuring gates on the unary basis qubits to check the validity of the amplitude distributor.
Args:
q (list): quantum register encoding the asset's price in the unary bases.
Returns:
generator that yels the measuring gates to check the probability distribution.
"""
yield gates.M(*q, register_name='prob') #No measure on the ancilla qubit is necessary
def extract_probability(qubits, counts, samples):
"""Measuring gates on the unary basis qubits to check the validity of the amplitude distributor.
Args:
qubits (int): number of qubits used for the unary basis.
counts (dict): times each output has been measured.
samples (int): number of samples for normalization.
Returns:
prob (list): normalized probabilities for the measured outcomes.
"""
form = '{0:0%sb}' % str(qubits) # qubits?
prob = []
for i in reversed(range(qubits)):
prob.append(counts.get(form.format(2**i), 0)/samples)
return prob
def get_pdf(qubits, S0, sig, r, T):
"""Get a pdf to input into the quantum register from a target probability distribution.
Args:
qubits (int): number of qubits used for the unary basis.
S0 (real): initial asset price.
sig (real): market volatility.
r (real): market rate.
T (real): maturity time.
Returns:
values (np.array): price values associated to the unary basis.
pdf (np.array): probability distribution for the asset's price evolution.
"""
mu = (r - 0.5 * sig ** 2) * T + np.log(S0)
mean = np.exp(mu + 0.5 * T * sig ** 2)
variance = (np.exp(T * sig ** 2) - 1) * np.exp(2 * mu + T * sig ** 2)
values = np.linspace(max(mean - 3 * np.sqrt(variance), 0), mean + 3 * np.sqrt(variance), qubits)
pdf = aux.log_normal(values, mu, sig * np.sqrt(T))
return values, pdf
def load_quantum_sim(qu, S0, sig, r, T):
"""Get a pdf to input into the quantum register from a target probability distribution.
Args:
qubits (int): number of qubits used for the unary basis.
S0 (real): initial asset price.
sig (real): market volatility.
r (real): market rate.
T (real): maturity time.
Returns:
circuit (Circuit): quantum circuit with the target probability encoded in the unary basis
values (np.array): price values associated to the unary basis.
pdf (np.array): probability distribution for the asset's price evolution.
"""
(values, pdf) = get_pdf(qu, S0, sig, r, T)
q, ancilla, circuit = create_qc(qu)
lognormal_parameters = rw_parameters(qu, pdf) # Solve for the parameters needed to create the target lognormal distribution
circuit.add(rw_circuit(qu, lognormal_parameters)) # Build the probaility loading circuit with the adjusted parameters
circuit.add(measure_probability(q)) #Circuit to test the precision of the probability loading algorithm
return circuit, (values, pdf)
def run_quantum_sim(qubits, circuit, shots):
"""Execute the quantum circuit and extract the probability of measuring each state of the unary basis
Args:
qubits (int): number of qubits used for the unary basis.
circuit (Circuit): quantum circuit with the target probability encoded in the unary basis.
shots (int): number of samples to extract from the circuit.
Returns:
prob_sim (list): normalized probability of each possible output in the unary basis.
"""
result = circuit(nshots=shots)
frequencies = result.frequencies(binary=True, registers=False)
prob_sim = extract_probability(qubits, frequencies, shots)
return prob_sim
def payoff_circuit(qubits, ancilla, K, S):
"""Quantum circuit that encodes the expected payoff into the probability of measuring an acilla qubit.
Args:
qubits (int): number of qubits used for the unary basis.
ancilla (int): qubit that encodes the payoff of the options.
K (real): strike price.
S (np.array): equivalent asset price for each element of the unary basis.
Returns:
generator that yields the gates required to encode the payoff into an ancillary qubit.
"""
for i in range(qubits): #Determine the first qubit's price that
qK = i #surpasses the strike price
if K<S[i]:
break
for i in range(qK, qubits): #Control-RY rotations controled by states
angle = 2 * np.arcsin(np.sqrt((S[i]-K)/(S[qubits-1]-K))) #with higher value than the strike
yield gates.RY(ancilla, angle).controlled_by(i) #targeting the ancilla qubit
def payoff_circuit_inv(qubits, ancilla, K, S):
"""Quantum circuit that encodes the expected payoff into the probability of measuring an acilla qubit in reverse.
Circuit used in the amplitude estimation part of the algorithm.
Args:
qubits (int): number of qubits used for the unary basis.
ancilla (int): qubit that encodes the payoff of the options.
K (real): strike price.
S (np.array): equivalent asset price for each element of the unary basis.
Returns:
generator that yields the gates required for the inverse of the circuit used to encode
the payoff into an ancillary qubit.
"""
for i in range(qubits): #Determine the first qubit's price that
qK = i #surpasses the strike price
if K<S[i]:
break
for i in range(qK, qubits): #Control-RY rotations controled by states
angle = 2 * np.arcsin(np.sqrt((S[i]-K)/(S[qubits-1]-K))) #with higher value than the strike
yield gates.RY(ancilla, -angle).controlled_by(i) #targeting the ancilla qubit
def measure_payoff(q, ancilla):
"""Measurement gates needed to measure the expected payoff and perform post-selection
Args:
q (list): quantum register encoding the asset's price in the unary bases.
ancilla (int): qubit that encodes | |
"""
jobs.py - base notification routines
Author
<NAME> <<EMAIL>>
License
Copyright (c) 2010-2012 Massachusetts Institute of Technology.
MIT License (cf. MIT-LICENSE.txt or http://www.opensource.org/licenses/mit-license.php)
"""
import sys,os
import datetime
if "." not in sys.path:
sys.path.append(".")
if "DJANGO_SETTINGS_MODULE" not in os.environ or __name__=="__main__":
os.environ['DJANGO_SETTINGS_MODULE'] = 'nbsite.settings'
from django.conf import settings
import base.utils as utils, base.models as M
from django.template.loader import render_to_string
from django.core.mail import EmailMessage
from django.db.models import Max
from django.db.models.deletion import Collector
from django.db.utils import IntegrityError
from django.db import transaction
VISIBILITY = {1: "Myself", 2: "Staff", 3: "Class"}
pending_inserts = []
def extract_obj(o, from_class, cut_at):
#inspired from from http://stackoverflow.com/a/2315053/768104
extracted = {}
print "pulling objects related to %s" % (o,)
links = [rel.get_accessor_name() for rel in o._meta.get_all_related_objects()]
for link in links:
rel_objects = getattr(o, link).all()
for ro in rel_objects:
classname = ro.__class__.__name__
if classname not in extracted:
extracted[classname]={}
if ro.id not in extracted[classname]:
extracted[classname][ro.id]=1
extract_obj(ro, classname, cut_at)
from django.db.models.fields.related import ForeignKey
def duplicate(objs, using_src, using_dest, special_handlers):
#adapted from http://stackoverflow.com/a/6064096/768104
collector = Collector(using_src)
collector.collect(objs)
collector.sort()
related_models = collector.data.keys()
duplicate_order = reversed(related_models)
extracted = {}
for model in duplicate_order:
# Find all FKs on model that point to a related_model.
fks = []
for f in model._meta.fields:
if isinstance(f, ForeignKey) and f.rel.to not in related_models:
fks.append(f)
# Replace each `sub_obj` with a duplicate.
if model not in collector.data:
continue
sub_objects = collector.data[model]
for obj in sub_objects:
for fk in fks:
rel_obj = getattr(obj, fk.name)
rel_cls = rel_obj.__class__
if rel_cls not in extracted:
extracted[rel_cls]={}
if rel_obj is not None and rel_obj.id not in extracted[rel_cls]:
extracted[rel_cls][rel_obj.id]=True
rel_obj.save(using=using_dest)
#print "-> saved related object %s" % (rel_obj,)
#now ready to insert obj:
if model not in extracted:
extracted[model]={}
if obj is not None and obj.id not in extracted[model]:
extracted[model][obj.id]=True
try:
obj.save(using=using_dest)
except IntegrityError as e:
pending_inserts.append(obj)
print "%s done TOTAL objects written: %s " % (model.__name__, sum([len(extracted[i]) for i in extracted]))
do_pending_inserts(using_dest)
def do_pending_inserts(using):
global pending_inserts
new_pending = []
for o in pending_inserts:
try:
o.save(using=using)
except IntegrityError as e:
new_pending.append(o)
def do_extract(t_args):
objs = [(M.Ensemble, 237), ]
objs_src = [o[0].objects.using("default").get(pk=o[1]) for o in objs]
def insert_parent_comments(o, using_dest):
ancestors = []
c = o.parent
while c is not None:
ancestors.append(c)
c = c.parent
for c2 in reversed(ancestors):
c2.save(using=using_dest)
print "Special Comment case: inserted %s parent comments" % (len(ancestors),)
duplicate(objs_src, "default", "sel", {M.Comment: insert_parent_comments})
objs_dest = [o[0].objects.using("sel").get(pk=o[1]) for o in objs]
def do_dumpensemble(t_args):
ensemble_ids = (3756, 3840) #Add ensembles here.
from django.core import serializers
Serializer = serializers.get_serializer("json")
serializer = Serializer()
f = open("ensembles.json", "w");
ensembles = M.Ensemble.objects.filter(id__in=ensemble_ids).distinct()
serializer.serialize(ensembles, indent=1, stream=f)
o = M.User.objects.filter(membership__ensemble__in=ensembles).distinct()
serializer.serialize(o, indent=1, stream=f, fields=("id", "firstname", "lastname", "email", "guest", "valid"))
o = M.Folder.objects.filter(ownership__ensemble__in=ensembles).distinct()
serializer.serialize(o, indent=1, stream=f)
o = M.Section.objects.filter(membership__ensemble__in=ensembles).distinct()
serializer.serialize(o, indent=1, stream=f)
o = M.Membership.objects.filter(ensemble__in=ensembles).distinct()
serializer.serialize(o, indent=1, stream=f)
o = M.Source.objects.filter(ownership__ensemble__in=ensembles).distinct()
serializer.serialize(o, indent=1, stream=f)
o = M.HTML5Info.objects.filter(source__ownership__ensemble__in=ensembles).distinct()
serializer.serialize(o, indent=1, stream=f)
o = M.Ownership.objects.filter(ensemble__in=ensembles).distinct()
serializer.serialize(o, indent=1, stream=f)
o = M.Location.objects.filter(ensemble__in=ensembles).distinct()
serializer.serialize(o, indent=1, stream=f)
o = M.HTML5Location.objects.filter(location__ensemble__in=ensembles).distinct()
serializer.serialize(o, indent=1, stream=f)
o = M.Comment.objects.filter(location__ensemble__in=ensembles).distinct()
serializer.serialize(o, indent=1, stream=f)
o = M.ThreadMark.objects.filter(comment__location__ensemble__in=ensembles).distinct()
serializer.serialize(o, indent=1, stream=f)
o = M.ReplyRating.objects.filter(comment__location__ensemble__in=ensembles).distinct()
serializer.serialize(o, indent=1, stream=f)
f.close()
def do_watchdog(t_args):
when = datetime.datetime.now()
print """
---------------------- WATCHDOG NOTIFICATIONS FOR %s -----------------""" % (when, )
do_watchdog_longpdfprocess()
do_watchdog_notstartedpdfprocess()
print "--------------- END OF WATCHDOG NOTIFICATIONS FOR %s -----------------" % (when, )
def do_immediate(t_args):
when = datetime.datetime.now()
print """
---------------------- IMMEDIATE NOTIFICATIONS FOR %s -----------------""" % (when, )
do_auth_immediate()
do_reply_immediate()
##do_answerplease_immediate()
##do_unclear_immediate()
do_all_immediate()
print "--------------- END OF IMMEDIATE NOTIFICATIONS FOR %s -----------------" % (when, )
def do_digest(t_args):
when = datetime.datetime.now()
print """
---------------------- DIGEST NOTIFICATIONS FOR %s -----------------""" % (when, )
#do_auth_digest()
#do_reply_digest()
##do_answerplease_digest()
##do_unclear_digest()
print "--------------- END OF DIGEST NOTIFICATIONS FOR %s -----------------" % (when, )
def do_auth_immediate():
latestCtime = M.Comment.objects.all().aggregate(Max("ctime"))["ctime__max"]
latestNotif = M.Notification.objects.get(type="auth_immediate")
setting_qry = "select coalesce(value, (select value from base_defaultsetting where name='email_confirmation_author')) from base_user u left join base_usersetting us on us.user_id=u.id and us.setting_id=(select id from base_defaultsetting where name='email_confirmation_author') where u.id=base_comment.author_id"
comments = M.Comment.objects.extra(select={"setting_value": setting_qry}).filter(ctime__gt=latestNotif.atime)
V={"reply_to": settings.SMTP_REPLY_TO, "protocol": settings.PROTOCOL, "hostname": settings.HOSTNAME }
for c in (o for o in comments if o.setting_value==2): #django doesn't let us filter by extra parameters yet
msg = render_to_string("email/msg_auth_immediate",{"V":V, "c": c, "visibility": VISIBILITY[c.type]})
email = EmailMessage("You've posted a new note on NB...",
msg,
settings.EMAIL_FROM,
(c.author.email, ),
(settings.EMAIL_BCC, ))
email.send()
try:
print msg
except UnicodeEncodeError:
print "not displaying msg b/c of unicode issues"
latestNotif.atime = latestCtime
latestNotif.save()
def do_all_immediate():
#send email to for all new msg in group where I'm an admin
latestCtime = M.Comment.objects.all().aggregate(Max("ctime"))["ctime__max"]
latestNotif = M.Notification.objects.get(type="all_immediate")
comments = M.Comment.objects.filter(ctime__gt=latestNotif.atime, type__gt=1)
V={"reply_to": settings.SMTP_REPLY_TO, "protocol": settings.PROTOCOL, "hostname": settings.HOSTNAME }
setting_qry = "select coalesce(value, (select value from base_defaultsetting where name='email_confirmation_all')) from base_user u left join base_usersetting us on us.user_id=u.id and us.setting_id=(select id from base_defaultsetting where name='email_confirmation_all') where u.id=base_membership.user_id"
for c in comments:
memberships = M.Membership.objects.extra(select={"setting_value": setting_qry}).filter(ensemble=c.location.ensemble, admin=True).exclude(user=c.author) #we don't want to send a notice to a faculty for a comment that he wrote !
for m in (o for o in memberships if o.setting_value==2): #django doesn't let us filter by extra parameters yet
msg = render_to_string("email/msg_all_immediate",{"V":V, "c": c, "visibility": VISIBILITY[c.type], "m": m})
email = EmailMessage("%s %s just wrote a comment on %s" % (c.author.firstname, c.author.lastname, c.location.source.title),
msg,
settings.EMAIL_FROM,
(m.user.email, ),
(settings.EMAIL_BCC, ))
email.send()
try:
print msg
except UnicodeEncodeError:
print "not displaying msg b/c of unicode issues"
latestNotif.atime = latestCtime
latestNotif.save()
def do_reply_immediate():
latestCtime = M.Comment.objects.all().aggregate(Max("ctime"))["ctime__max"]
latestNotif = M.Notification.objects.get(type="reply_immediate")
setting_qry = "select coalesce(value, (select value from base_defaultsetting where name='email_confirmation_reply_author')) from base_user u left join base_usersetting us on us.user_id=u.id and us.setting_id=(select id from base_defaultsetting where name='email_confirmation_reply_author') where u.id=base_comment.author_id"
recentComments = M.Comment.objects.filter(ctime__gt=latestNotif.atime, type=3, parent__type=3)
V={"reply_to": settings.SMTP_REPLY_TO, "protocol": settings.PROTOCOL, "hostname": settings.HOSTNAME }
#TODO: This is ugly: I'd like to keep this vectorized at the DB level, but I don't know how to do it in django, hence the double forloop.
for rc in recentComments:
comments = M.Comment.objects.extra(select={"setting_value": setting_qry}).filter(location=rc.location).exclude(author=rc.author)
emailed_uids=[] #bookkeeping in order not to email N times someone who posted N posts in a thread !
for c in (o for o in comments if o.setting_value==2): #django doesn't let us filter by extra parameters yet
if c.author_id not in emailed_uids:
emailed_uids.append(c.author_id)
msg = render_to_string("email/msg_reply_immediate",{"V": V, "c":c, "rc":rc})
email = EmailMessage("New reply on %s" % (c.location.source.title,),
msg, settings.EMAIL_FROM, (c.author.email, ),(settings.EMAIL_BCC, ))
email.send()
try:
print msg
except UnicodeEncodeError:
print "not displaying msg b/c of unicode issues"
latestNotif.atime = latestCtime
latestNotif.save()
def do_watchdog_longpdfprocess():
minutes_ago = datetime.datetime.now() - datetime.timedelta(0, 10*60) # 10 minutes ago
objs = M.Processqueue.objects.filter(started__isnull=False, completed__isnull=True, started__lt=minutes_ago)
if objs.count() > 0:
o=objs[0]
V = {"processtime": datetime.datetime.now()-o.started, "o": o, "hostname": settings.HOSTNAME }
msg = render_to_string("email/msg_watchdog_longpdf",V)
recipients = [i[1] for i in settings.ADMINS]
email = EmailMessage("NB Watchdog warning: long pdf process",
msg,
settings.EMAIL_WATCHDOG,
recipients,
(settings.EMAIL_BCC, ))
email.send()
print msg
def do_watchdog_notstartedpdfprocess():
minutes_ago = datetime.datetime.now() - datetime.timedelta(0, 20*60) # 20 minutes ago
objs = M.Processqueue.objects.filter(started__isnull=True, submitted__lt=minutes_ago)
#rows = DB.getRows("select p.id_source, s.title, now()-p.submitted from nb2_processqueue p left join source s on s.id=p.id_source where now()-p.submitted>'60 minutes' and p.started is null", ());
if objs.count() > 0:
V = {"objs": objs, "hostname": settings.HOSTNAME }
msg = render_to_string("email/msg_watchdog_notstartedpdf",V)
recipients = [i[1] for i in settings.ADMINS]
email = EmailMessage("NB Watchdog warning: some pdf processes haven't started yet",
msg,
settings.EMAIL_WATCHDOG,
recipients,
(settings.EMAIL_BCC, ))
email.send()
print msg
def do_auth_digest():
latestCtime = DB.getVal("select max(ctime) from nb2_comment", ());
rows = DB.getRows("""
select v.id, v.id_location, v.id_author, v.email, v.id_type, v.title, v.body, v.ctime, e.name, us.value as user_setting, ds.value as default_setting
from nb2_v_comment v left join nb2_user_settings us on id_author=us.id_user and us.name='email_confirmation_author' and us.valid=1, ensemble e, nb2_default_settings ds
where
e.id= v.id_ensemble and
v.ctime > (select atime from nb2_latest_notifications where type='auth_digest')
and ds.name='email_confirmation_author'
and ( us.value = 1 or (us.value is null and ds.value=1))
order by v.ctime""", ())
msg_by_email = {}
for r2 in rows:
i=1
V={"reply_to": settings.SMTP_REPLY_TO, "protocol": settings.PROTOCOL, "hostname": settings.HOSTNAME }
V["id_location"] = r2[i];i+=1
V["id_author"] = r2[i];i+=1
V["email"] = r2[i];i+=1
#V["visibility"];
| |
df = df.dropna(subset=[CN.cdr3b, CN.epitope])
df = df[
(df[CN.cdr3b].map(is_valid_aaseq)) &
(df[CN.epitope].map(is_valid_aaseq))
]
logger.debug('Current df_enc.shape: %s' % str(df.shape))
logger.debug('Loaded Zhang data. Current df_enc.shape: %s' % str(df.shape))
return df
class IEDBTCREpitopeDFLoader(FileTCREpitopeDFLoader):
def _load_from_file(self, fn_source):
logger.debug('Loading from %s' % fn_source)
df = pd.read_csv(fn_source)
logger.debug('Current df_enc.shape: %s' % str(df.shape))
df[CN.epitope] = df['Description'].str.strip().str.upper()
df[CN.epitope_gene] = df['Antigen']
df[CN.epitope_species] = df['Organism']
df[CN.mhc] = df['MHC Allele Names']
df[CN.cdr3b] = df['Chain 2 CDR3 Curated'].str.strip().str.upper()
df[CN.species] = 'human'
df[CN.source] = 'IEDB'
df[CN.ref_id] = df['Reference ID'].map(lambda x: 'IEDB:%s' % x)
df[CN.label] = 1
logger.debug('Select valid beta CDR3 and epitope sequences')
df = df.dropna(subset=[CN.cdr3b, CN.epitope])
df = df[
(df[CN.cdr3b].map(is_valid_aaseq)) &
(df[CN.epitope].map(is_valid_aaseq))
]
logger.debug('Current df_enc.shape: %s' % str(df.shape))
df.index = df.apply(lambda row: self._make_index(row), axis=1)
df = df.loc[:, CN.values()]
logger.debug('Loaded IEDB data. Current df_enc.shape: %s' % str(df.shape))
return df
class NetTCREpitopeDFLoader(FileTCREpitopeDFLoader):
def _load_from_file(self, fn_source):
logger.debug('Loading from %s' % fn_source)
df = pd.read_csv(fn_source, sep=';')
logger.debug('Current df_enc.shape: %s' % str(df.shape))
df[CN.epitope] = df['peptide'].str.strip().str.upper()
df[CN.epitope_gene] = None
df[CN.epitope_species] = None
df[CN.mhc] = 'HLA-A*02:01'
df[CN.cdr3b] = df['CDR3'].str.strip().str.upper()
df[CN.species] = 'human'
df[CN.source] = 'NetTCR'
df[CN.ref_id] = 'PMID:34508155'
df[CN.label] = df['binder']
logger.debug('Select valid beta CDR3 and epitope sequences')
df = df.dropna(subset=[CN.cdr3b, CN.epitope])
df = df[
(df[CN.cdr3b].map(is_valid_aaseq)) &
(df[CN.epitope].map(is_valid_aaseq))
]
logger.debug('Current df_enc.shape: %s' % str(df.shape))
df.index = df.apply(lambda row: self._make_index(row), axis=1)
df = df.loc[:, CN.values()]
logger.debug('Loaded NetTCR data. Current df_enc.shape: %s' % str(df.shape))
return df
class pTMnetTCREpitopeDFLoader(FileTCREpitopeDFLoader):
def _load_from_file(self, fn_source):
logger.debug('Loading from %s' % fn_source)
df = pd.read_csv(fn_source)
logger.debug('Current df.shape: %s' % str(df.shape))
df[CN.epitope] = df['Antigen'].str.strip().str.upper()
df[CN.epitope_gene] = None
df[CN.epitope_species] = None
df[CN.mhc] = df['HLA'].str.strip().str.upper()
df[CN.cdr3b] = df['CDR3'].str.strip().str.upper()
df[CN.species] = None
df[CN.source] = 'pTMnet'
df[CN.ref_id] = 'lu2021deep'
df[CN.label] = 1
logger.debug('Select valid beta CDR3 and epitope sequences')
df = df.dropna(subset=[CN.cdr3b, CN.epitope])
df = df[
(df[CN.cdr3b].map(is_valid_aaseq)) &
(df[CN.epitope].map(is_valid_aaseq))
]
logger.debug('Current df.shape: %s' % str(df.shape))
df.index = df.apply(lambda row: self._make_index(row), axis=1)
df = df.loc[:, CN.values()]
logger.debug('Loaded pTMnet data. Current df.shape: %s' % str(df.shape))
return df
class ConcatTCREpitopeDFLoader(TCREpitopeDFLoader):
def __init__(self, loaders=None, filters=None, negative_generator=None):
super().__init__(filters, negative_generator)
self.loaders = loaders
def _load(self):
dfs = []
for loader in self.loaders:
dfs.append(loader.load())
return pd.concat(dfs)
class TCREpitopeSentenceEncoder(object):
def __init__(self, tokenizer=None, max_len=None):
self.tokenizer = tokenizer
self.max_len = max_len
def encode(self, epitope, cdr3b):
token_ids = [self.start_token_id] + self._encode(epitope, cdr3b) + [self.stop_token_id]
n_tokens = len(token_ids)
if n_tokens > self.max_len:
raise ValueError('Too long tokens: %s > %s' % (n_tokens, self.max_len))
n_pads = self.max_len - n_tokens
if n_pads > 0:
token_ids = token_ids + [self.pad_token_id] * n_pads
return token_ids
def _encode(self, epitope, cdr3b):
raise NotImplementedError()
# def decode(self, sentence_ids):
# raise NotImplementedError()
def is_valid_sentence(self, sentence_ids):
if len(sentence_ids) != self.max_len:
return False
start_loc = 0
pad_loc = sentence_ids.index(self.pad_token_id)
stop_loc = pad_loc - 1
if (sentence_ids[start_loc] != self.start_token_id) or (sentence_ids[stop_loc] != self.stop_token_id):
return False
pad_ids = sentence_ids[pad_loc:]
if any([tid != self.pad_token_id for tid in pad_ids]):
return False
return self._is_valid_sentence(sentence_ids[start_loc+1:stop_loc])
def _is_valid_sentence(self, sentence_ids):
raise NotImplementedError()
@property
def pad_token(self):
return '<pad>'
@property
def pad_token_id(self):
return self.tokenizer.vocab[self.pad_token]
@property
def start_token(self):
return self.tokenizer.start_token
@property
def start_token_id(self):
return self.tokenizer.vocab[self.tokenizer.start_token]
@property
def stop_token(self):
return self.tokenizer.stop_token
@property
def stop_token_id(self):
return self.tokenizer.vocab[self.tokenizer.stop_token]
@property
def sep_token(self):
return self.tokenizer.stop_token
@property
def sep_token_id(self):
return self.tokenizer.vocab[self.tokenizer.stop_token]
@property
def mask_token(self):
return self.tokenizer.mask_token
@property
def mask_token_id(self):
return self.tokenizer.vocab[self.tokenizer.mask_token]
def to_tokens(self, token_ids):
return self.tokenizer.convert_ids_to_tokens(token_ids)
def to_token_ids(self, tokens):
return self.tokenizer.convert_tokens_to_ids(tokens)
class DefaultTCREpitopeSentenceEncoder(TCREpitopeSentenceEncoder):
def __init__(self, tokenizer=None, max_len=None):
super().__init__(tokenizer=tokenizer, max_len=max_len)
def _encode(self, epitope, cdr3b):
tokens = list(epitope) + [self.sep_token] + list(cdr3b)
return self.to_token_ids(tokens)
def _is_valid_sentence(self, sentence_ids):
sep_loc = sentence_ids.index(self.sep_token_id)
epitope_ids = sentence_ids[:sep_loc]
cdr3b_ids = sentence_ids[sep_loc+1:]
return is_valid_aaseq(''.join(self.to_tokens(epitope_ids + cdr3b_ids)))
class NoSepTCREpitopeSentenceEncoder(TCREpitopeSentenceEncoder):
def __init__(self, tokenizer=None, max_len=None):
super().__init__(tokenizer=tokenizer, max_len=max_len)
def _encode(self, epitope, cdr3b):
return self.to_token_ids(list(epitope + cdr3b))
def _is_valid_sentence(self, sentence_ids):
seq = ''.join(self.to_tokens(sentence_ids))
return is_valid_aaseq(seq)
class TCREpitopeSentenceDataset(Dataset):
CN_SENTENCE = 'sentence'
# TRAIN_TEST_SUFFIXES = ('.train', '.test')
_all_data_conf = None
def __init__(self, config=None, df_enc=None, encoder=None):
self.config = config
self.df_enc = df_enc
self.encoder = encoder
def train_test_split(self, test_size=0.2, shuffle=True):
train_df, test_df = train_test_split(self.df_enc,
test_size=test_size,
shuffle=shuffle,
stratify=self.df_enc[CN.label].values)
train_config = copy.deepcopy(self.config)
test_config = copy.deepcopy(self.config)
return TCREpitopeSentenceDataset(config=train_config, df_enc=train_df, encoder=self.encoder), \
TCREpitopeSentenceDataset(config=test_config, df_enc=test_df, encoder=self.encoder)
def __getitem__(self, index):
row = self.df_enc.iloc[index, :]
sentence_ids = row[self.CN_SENTENCE]
label = row[CN.label]
return torch.tensor(sentence_ids), torch.tensor(label)
def __len__(self):
return self.df_enc.shape[0]
@property
def name(self):
return self.config.get('name', '')
@property
def max_len(self):
return self.encoder.max_len
@property
def output_csv(self):
return self.config.get('output_csv', '')
@classmethod
def from_key(cls, data_key=None):
def encode_row(row, encoder):
try:
return encoder.encode(epitope=row[CN.epitope], cdr3b=row[CN.cdr3b])
except ValueError as e:
logger.warning(e)
return None
config = cls._get_data_conf(data_key)
config['name'] = data_key
encoder_config = config['encoder']
encoder = cls._create_encoder(encoder_config)
output_csv = config['output_csv'].format(**config)
df = None
if not os.path.exists(output_csv) or config['overwrite']:
df = cls._load_source_df(config)
df[cls.CN_SENTENCE] = df.apply(lambda row: encode_row(row, encoder), axis=1)
df = df.dropna(subset=[cls.CN_SENTENCE])
df.to_csv(output_csv)
logger.info('%s dataset was saved to %s, df.shape: %s' % (data_key, output_csv, str(df.shape)))
else:
df = pd.read_csv(output_csv, index_col=0, converters={cls.CN_SENTENCE: lambda x: eval(x)})
logger.info('%s dataset was loaded from %s, df.shape: %s' % (data_key, output_csv, str(df.shape)))
config['output_csv'] = output_csv
return cls(config=config, df_enc=df, encoder=encoder)
@classmethod
def from_items(cls, items, encoder_config):
rows = []
encoder = cls._create_encoder(encoder_config)
for epitope, cdr3b, label in items:
try:
sent = encoder.encode(epitope=epitope, cdr3b=cdr3b)
rows.append([epitope, None, None, None, cdr3b, None, None, None, label, sent])
except ValueError as e:
logger.waring(e)
df = pd.DataFrame(rows, columns=CN.values() + [cls.CN_SENTENCE])
return cls(config={}, df_enc=df, encoder=encoder)
@classmethod
def _create_encoder(cls, config):
encoder = None
encoder_type = config.get('type', 'default')
if encoder_type == 'default':
encoder = DefaultTCREpitopeSentenceEncoder(tokenizer=TAPETokenizer(vocab=config['vocab']),
max_len=config['max_len'])
elif encoder_type == 'nosep':
encoder = NoSepTCREpitopeSentenceEncoder(tokenizer=TAPETokenizer(vocab=config['vocab']),
max_len=config['max_len'])
else:
raise ValueError('Unknown encoder type: %s' % encoder_type)
return encoder
# @classmethod
# def load_df(cls, fn):
# return pd.read_csv(fn, index_col=0, converters={cls.CN_SENTENCE: lambda x: eval(x)})
@classmethod
def _load_source_df(cls, config):
logger.debug('Loading source dataset for %s' % config['name'])
logger.debug('config: %s' % config)
loaders = [DATA_LOADERS[loader_key] for loader_key in config['loaders']]
filters = [TCREpitopeDFLoader.NotDuplicateFilter()]
if config.get('query'):
filters.append(TCREpitopeDFLoader.QueryFilter(query=config['query']))
if config.get('n_cdr3b_cutoff'):
filters.append(TCREpitopeDFLoader.MoreThanCDR3bNumberFilter(cutoff=config['n_cdr3b_cutoff']))
negative_generator = TCREpitopeDFLoader.DefaultNegativeGenerator() if config['generate_negatives'] else None
loader = ConcatTCREpitopeDFLoader(loaders=loaders, filters=filters, negative_generator=negative_generator)
return loader.load()
@classmethod
def _get_data_conf(cls, data_key):
if cls._all_data_conf is None:
cls._all_data_conf = FileUtils.json_load('../config/data.json')
conf = cls._all_data_conf[data_key]
return conf
DATA_LOADERS = OrderedDict({
'test': NetTCREpitopeDFLoader('../data/test.csv'),
'test.train': NetTCREpitopeDFLoader('../data/test.train.csv'),
'test.eval': NetTCREpitopeDFLoader('../data/test.eval.csv'),
'dash': DashTCREpitopeDFLoader('../data/Dash/human_mouse_pairseqs_v1_parsed_seqs_probs_mq20_clones.tsv'),
'vdjdb': VDJDbTCREpitopeDFLoader('../data/VDJdb/vdjdb_20210201.txt'),
'mcpas': McPASTCREpitopeDFLoader('../data/McPAS/McPAS-TCR_20210521.csv'),
'shomuradova': ShomuradovaTCREpitopeDFLoader('../data/Shomuradova/sars2_tcr.tsv'),
'immunecode': ImmuneCODETCREpitopeDFLoader('../data/ImmuneCODE/sars2_YLQPRTFLL_with_neg_nodup.csv'),
'immunecode002_1': ImmuneCODE2TCREpitopeDFLoader('../data/ImmuneCODE-MIRA-Release002.1/peptide-detail-ci.csv'),
'zhang': ZhangTCREpitopeDFLoader('../data/Zhang'),
'iedb_sars2': IEDBTCREpitopeDFLoader('../data/IEDB/tcell_receptor_sars2_20210618.csv'),
'nettcr_train': NetTCREpitopeDFLoader('../data/NetTCR/train_beta_90.csv'),
'nettcr_eval': NetTCREpitopeDFLoader('../data/NetTCR/mira_eval_threshold90.csv'),
'pTMnet': pTMnetTCREpitopeDFLoader('../data/pTMnet/testing_data.csv')
})
#######
# Tests
#######
class TCREpitopeDFLoaderTest(BaseTest):
@classmethod
def setUpClass(cls):
super().setUpClass()
pd.set_option('display.max.rows', 999)
pd.set_option('display.max.columns', 999)
logger.setLevel(logging.DEBUG)
def setUp(self) -> None:
#
# self.fn_dash = '../data/Dash/human_mouse_pairseqs_v1_parsed_seqs_probs_mq20_clones.tsv'
# self.fn_vdjdb = '../data/VDJdb/vdjdb_20210201.txt'
# self.fn_mcpas = '../data/McPAS/McPAS-TCR_20210521.csv'
# self.fn_shomuradova = '../data/Shomuradova/sars2_tcr.tsv'
self.fn_tcr_cntr = '../data/TCRGP/human_tcr_control.csv'
def assert_df_index(self, index, sep='_'):
tokens = index.split(sep)
epitope = tokens[0]
cdr3b = tokens[1]
self.assertTrue(is_valid_aaseq(epitope), 'Invalid epitope seq: %s' % epitope)
self.assertTrue(is_valid_aaseq(cdr3b), 'Invalid cdr3b seq: %s' % cdr3b)
def assert_df(self, df):
self.assertIsNotNone(df)
self.assertTrue(df.shape[0] > 0)
df.index.map(self.assert_df_index)
self.assertTrue(all(df[CN.epitope].map(is_valid_aaseq)))
self.assertTrue(all(df[CN.cdr3b].map(is_valid_aaseq)))
self.assertTrue(all(df[CN.label].map(lambda x: x in [0, 1])))
def print_summary_df(self, df):
print('df_enc.shape: %s' % str(df.shape))
print(df.head())
print(df[CN.epitope].value_counts())
print(df[CN.label].value_counts())
# def test_dash(self):
# # loader = DashTCREpitopeDFLoader(fn_source=self.fn_dash)
# loader = DATA_LOADERS['dash']
#
# df_enc = loader.load()
# self.assert_df(df_enc)
# self.print_summary_df(df_enc)
#
# def test_vdjdb(self):
# # loader = VDJDbTCREpitopeDFLoader(fn_source=self.fn_vdjdb)
# loader = DATA_LOADERS['vdjdb']
# df_enc = loader.load()
# self.assert_df(df_enc)
# self.print_summary_df(df_enc)
#
# def test_mcpas(self):
# # loader = McPASTCREpitopeDFLoader(fn_source=self.fn_mcpas)
# loader = DATA_LOADERS['mcpas']
# df_enc = loader.load()
# self.assert_df(df_enc)
# self.print_summary_df(df_enc)
#
# def test_shomuradova(self):
# # loader = ShomuradovaTCREpitopeDFLoader(fn_source=self.fn_shomuradova)
# loader = DATA_LOADERS['shomuradova']
# df_enc = loader.load()
# self.assert_df(df_enc)
# self.print_summary_df(df_enc)
def test_data_loaders(self):
# keys = ['vdjdb', 'mcpas']
keys = DATA_LOADERS.keys()
for key in keys:
logger.debug('Test loader: %s' % key)
loader = DATA_LOADERS[key]
df = loader.load()
self.assert_df(df)
self.print_summary_df(df)
def test_concat(self):
loaders = DATA_LOADERS.values()
n_rows = 0
for loader in loaders:
df = loader.load()
n_rows += df.shape[0]
loader = ConcatTCREpitopeDFLoader(loaders=loaders)
df = loader.load()
self.assertEqual(n_rows, df.shape[0])
self.assert_df(df)
self.print_summary_df(df)
def test_filter(self):
loader = ConcatTCREpitopeDFLoader(loaders=[DATA_LOADERS['vdjdb']])
df = loader.load()
n_dup = np.count_nonzero(df.index.duplicated())
self.assertTrue(n_dup > 0)
cutoff = 20
tmp = df[CN.epitope].value_counts() # tmp.index: epitope, tmp.value: count
self.assertTrue(any(tmp < cutoff))
loader = ConcatTCREpitopeDFLoader(loaders=[DATA_LOADERS['vdjdb']],
filters=[TCREpitopeDFLoader.NotDuplicateFilter()])
df = loader.load()
n_dup = np.count_nonzero(df.index.duplicated())
self.assertTrue(n_dup == 0)
tmp = df[CN.epitope].value_counts() # tmp.index: epitope, tmp.value: count
self.assertTrue(any(tmp < cutoff))
loader = ConcatTCREpitopeDFLoader(loaders=[DATA_LOADERS['vdjdb']],
filters=[TCREpitopeDFLoader.NotDuplicateFilter(),
TCREpitopeDFLoader.MoreThanCDR3bNumberFilter(cutoff=cutoff)])
df = loader.load()
n_dup = np.count_nonzero(df.index.duplicated())
self.assertTrue(n_dup == 0)
tmp = df[CN.epitope].value_counts() # tmp.index: epitope, tmp.value: count
self.assertTrue(all(tmp >= cutoff))
self.print_summary_df(df)
def test_negative_generator(self):
cutoff = 20
loader = ConcatTCREpitopeDFLoader(loaders=[DATA_LOADERS['vdjdb']],
filters=[TCREpitopeDFLoader.NotDuplicateFilter(),
TCREpitopeDFLoader.MoreThanCDR3bNumberFilter(cutoff=cutoff)],
negative_generator=TCREpitopeDFLoader.DefaultNegativeGenerator(fn_tcr_cntr=self.fn_tcr_cntr))
df = loader.load()
df_pos = df[df[CN.label] == 1]
df_neg = df[df[CN.label] == 0]
self.assertEqual(df_pos.shape[0], df_neg.shape[0])
pos_cdr3b = df_pos[CN.cdr3b].unique()
neg_cdr3b = df_neg[CN.cdr3b].unique()
self.assertTrue(np.intersect1d(pos_cdr3b, neg_cdr3b).shape[0] == 0)
for epitope, subdf in df.groupby([CN.epitope]):
subdf_pos | |
to shift and scale the distribution. Specifically,
this is equivalent to computing :math:`f(y)` where :math:`y=(x-loc)/scale`.
**Inputs:**
* **loc** (`float`):
location parameter
* **scale** (`float`):
scale parameter
The following methods are available for ``Rayleigh``:
* ``cdf``, ``pdf``, ``log_pdf``, ``icdf``, ``rvs``, ``moments``, ``fit``.
"""
def __init__(self, loc=0, scale=1):
super().__init__(loc=loc, scale=scale, order_params=('loc', 'scale'))
self._construct_from_scipy(scipy_name=stats.rayleigh)
class TruncNorm(DistributionContinuous1D):
"""
Truncated normal distribution
The standard form of this distribution (i.e, loc=0., scale=1) is a standard normal truncated to the range [a, b].
Note that a and b are defined over the domain of the standard normal.
**Inputs:**
* **a** (`float`):
shape parameter
* **b** (`float`):
shape parameter
* **loc** (`float`):
location parameter
* **scale** (`float`):
scale parameter
The following methods are available for ``TruncNorm``:
* ``cdf``, ``pdf``, ``log_pdf``, ``icdf``, ``rvs``, ``moments``, ``fit``.
"""
def __init__(self, a, b, loc=0, scale=1.):
super().__init__(a=a, b=b, loc=loc, scale=scale, order_params=('a', 'b', 'loc', 'scale'))
self._construct_from_scipy(scipy_name=stats.truncnorm)
class Uniform(DistributionContinuous1D):
"""
Uniform distribution having probability density function
.. math:: f(x|a, b) = \dfrac{1}{b-a}
where :math:`a=loc` and :math:`b=loc+scale`
**Inputs:**
* **loc** (`float`):
lower bound
* **scale** (`float`):
range
The following methods are available for ``Uniform``:
* ``cdf``, ``pdf``, ``log_pdf``, ``icdf``, ``rvs``, ``moments``, ``fit``.
"""
def __init__(self, loc=0., scale=1.):
super().__init__(loc=loc, scale=scale, order_params=('loc', 'scale'))
self._construct_from_scipy(scipy_name=stats.uniform)
########################################################################################################################
# Univariate Discrete Distributions
########################################################################################################################
class DistributionDiscrete1D(Distribution):
"""
Parent class for univariate discrete distributions.
"""
def __init__(self, **kwargs):
super().__init__(**kwargs)
@staticmethod
def _check_x_dimension(x):
"""
Check the dimension of input x - must be an ndarray of shape (npoints,) or (npoints, 1)
"""
x = np.atleast_1d(x)
if len(x.shape) > 2 or (len(x.shape) == 2 and x.shape[1] != 1):
raise ValueError('Wrong dimension in x.')
return x.reshape((-1, ))
def _construct_from_scipy(self, scipy_name=stats.rv_discrete):
self.cdf = lambda x: scipy_name.cdf(x=self._check_x_dimension(x), **self.params)
self.pmf = lambda x: scipy_name.pmf(x=self._check_x_dimension(x), **self.params)
self.log_pmf = lambda x: scipy_name.logpmf(x=self._check_x_dimension(x), **self.params)
self.icdf = lambda x: scipy_name.ppf(q=self._check_x_dimension(x), **self.params)
self.moments = lambda moments2return='mvsk': scipy_name.stats(moments=moments2return, **self.params)
self.rvs = lambda nsamples=1, random_state=None: scipy_name.rvs(
size=nsamples, random_state=random_state, **self.params).reshape((nsamples, 1))
class Binomial(DistributionDiscrete1D):
"""
Binomial distribution having probability mass function:
.. math:: f(x) = {number_of_dimensions \choose x} p^x(1-p)^{number_of_dimensions-x}
for :math:`x\inumber_of_dimensions\{0, 1, 2, ..., number_of_dimensions\}`.
In this standard form `(loc=0)`. Use `loc` to shift the distribution. Specifically, this is equivalent to computing
:math:`f(y)` where :math:`y=x-loc`.
**Inputs:**
* **number_of_dimensions** (`int`):
number of trials, integer >= 0
* **p** (`float`):
success probability for each trial, real number in [0, 1]
* **loc** (`float`):
location parameter
The following methods are available for ``Binomial``:
* ``cdf``, ``pmf``, ``log_pmf``, ``icdf``, ``rvs, moments``.
"""
def __init__(self, n, p, loc=0.):
super().__init__(n=n, p=p, loc=loc, order_params=('number_of_dimensions', 'p', 'loc'))
self._construct_from_scipy(scipy_name=stats.binom)
class Poisson(DistributionDiscrete1D):
"""
Poisson distribution having probability mass function:
.. math:: f(x) = \exp{(-\mu)}\dfrac{\mu^k}{k!}
for :math:`x\ge 0`.
In this standard form `(loc=0)`. Use `loc` to shift the distribution. Specifically, this is equivalent to computing
:math:`f(y)` where :math:`y=x-loc`.
**Inputs:**
* **mu** (`float`):
shape parameter
* **loc** (`float`):
location parameter
The following methods are available for ``Poisson``:
* ``cdf``, ``pmf``, ``log_pmf``, ``icdf``, ``rvs``, ``moments``.
"""
def __init__(self, mu, loc=0.):
super().__init__(mu=mu, loc=loc, order_params=('mu', 'loc'))
self._construct_from_scipy(scipy_name=stats.poisson)
########################################################################################################################
# Multivariate Continuous Distributions
########################################################################################################################
class DistributionND(Distribution):
"""
Parent class for multivariate probability distributions.
"""
def __init__(self, **kwargs):
super().__init__(**kwargs)
@staticmethod
def _check_x_dimension(x, d=None):
"""
Check the dimension of input x - must be an ndarray of shape (npoints, d)
"""
x = np.array(x)
if len(x.shape) != 2:
raise ValueError('Wrong dimension in x.')
if (d is not None) and (x.shape[1] != d):
raise ValueError('Wrong dimension in x.')
return x
class MVNormal(DistributionND):
"""
Multivariate normal distribution having probability density function
.. math:: f(x) = \dfrac{1}{\sqrt{(2\pi)^k\det\Sigma}}\exp{-\dfrac{1}{2}(x-\mu)^T\Sigma^{-1}(x-\mu)}
where :math:`\mu` is the mean vector, :math:`\Sigma` is the covariance matrix, and :math:`k` is the dimension of
`x`.
**Inputs:**
* **mean** (`ndarray`):
mean vector, `ndarray` of shape `(dimension, )`
* **cov** (`float` or `ndarray`):
covariance, `float` or `ndarray` of shape `(dimension, )` or `(dimension, dimension)`. Default is 1.
The following methods are available for ``MVNormal``:
* ``pdf``, ``log_pdf``, ``rvs``, ``fit``, ``moments``.
"""
def __init__(self, mean, cov=1.):
if len(np.array(mean).shape) != 1:
raise ValueError('Input mean must be a 1D array.')
if isinstance(cov, (int, float)):
pass
else:
if not (len(np.array(cov).shape) in [1, 2] and all(sh == len(mean) for sh in np.array(cov).shape)):
raise ValueError('Input cov must be a float or ndarray of appropriate dimensions.')
super().__init__(mean=mean, cov=cov, order_params=['mean', 'cov'])
def pdf(self, x):
pdf_val = stats.multivariate_normal.pdf(x=x, **self.params)
return np.atleast_1d(pdf_val)
def log_pdf(self, x):
logpdf_val = stats.multivariate_normal.logpdf(x=x, **self.params)
return np.atleast_1d(logpdf_val)
def rvs(self, nsamples=1, random_state=None):
if not (isinstance(nsamples, int) and nsamples >= 1):
raise ValueError('Input nsamples must be an integer > 0.')
return stats.multivariate_normal.rvs(
size=nsamples, random_state=random_state, **self.params).reshape((nsamples, -1))
def fit(self, x):
mle_mu, mle_cov = self.params['mean'], self.params['cov']
if mle_mu is None:
mle_mu = np.mean(x, axis=0)
if mle_cov is None:
tmp_x = x - np.tile(mle_mu.reshape(1, -1), [x.shape[0], 1])
mle_cov = np.matmul(tmp_x, tmp_x.T) / x.shape[0]
return {'mean': mle_mu, 'cov': mle_cov}
def moments(self, moments2return='mv'):
if moments2return == 'number_of_variables':
return self.get_params()['mean']
elif moments2return == 'v':
return self.get_params()['cov']
elif moments2return == 'mv':
return self.get_params()['mean'], self.get_params()['cov']
else:
raise ValueError('UQpy: moments2return must be "number_of_variables", "v" or "mv".')
class Multinomial(DistributionND):
"""
Multinomial distribution having probability mass function
.. math:: f(x) = \dfrac{number_of_dimensions!}{x_1!\dots x_k!}p_1^{x_1}\dots p_k^{x_k}
for :math:`x=\{x_1,\dots,x_k\}` where each :math:`x_i` is a non-negative integer and :math:`\sum_i x_i = number_of_dimensions`.
**Inputs:**
* **number_of_dimensions** (`int`):
number of trials
* **p** (`array_like`):
probability of a trial falling into each category; should sum to 1
The following methods are available for ``Multinomial``:
* ``pmf``, ``log_pmf``, ``rvs``, ``moments``.
"""
def __init__(self, n, p):
super().__init__(n=n, p=p)
def pmf(self, x):
pdf_val = stats.multinomial.pmf(x=x, **self.params)
return np.atleast_1d(pdf_val)
def log_pmf(self, x):
logpdf_val = stats.multinomial.logpmf(x=x, **self.params)
return np.atleast_1d(logpdf_val)
def rvs(self, nsamples=1, random_state=None):
if not (isinstance(nsamples, int) and nsamples >= 1):
raise ValueError('Input nsamples must be an integer > 0.')
return stats.multinomial.rvs(
size=nsamples, random_state=random_state, **self.params).reshape((nsamples, -1))
def moments(self, moments2return='mv'):
if moments2return == 'number_of_variables':
mean = self.get_params()['number_of_dimensions'] * np.array(self.get_params()['p'])
return mean
elif moments2return == 'v':
n, p = self.get_params()['number_of_dimensions'], np.array(self.get_params()['p'])
d = len(p)
cov = - n * np.tile(p[np.newaxis, :], [d, 1]) * np.tile(p[:, np.newaxis], [1, d])
np.fill_diagonal(cov, n * p * (1. - p))
return cov
elif moments2return == 'mv':
n, p = self.get_params()['number_of_dimensions'], np.array(self.get_params()['p'])
d = len(p)
cov = - n * np.tile(p[np.newaxis, :], [d, 1]) * np.tile(p[:, np.newaxis], [1, d])
np.fill_diagonal(cov, n * p * (1. - p))
mean = n * p
return mean, cov
else:
raise ValueError('UQpy: moments2return must be "number_of_variables", "v" or "mv".')
class JointInd(DistributionND):
"""
Define a joint distribution from its independent marginals. ``JointInd`` is a child class of ``DistributionND``.
**Inputs:**
* **marginals** (`list`):
list of ``DistributionContinuous1D`` or ``DistributionDiscrete1D`` objects that define the marginals.
Such a multivariate distribution possesses the following methods, on condition that all its univariate marginals
also possess them:
* ``pdf``, ``log_pdf``, ``cdf``, ``rvs``, ``fit``, ``moments``.
The parameters of the distribution are only stored as attributes of the marginal objects. However, the
*get_params* and *update_params* method can still be used for the joint. Note that, for this purpose, each parameter
of the joint is assigned a unique string identifier as `key_index` - where `key` is the parameter name and `index`
the index of the marginal (e.g., location parameter of the 2nd marginal is identified as `loc_1`).
"""
def __init__(self, marginals):
super().__init__()
self.order_params = []
for i, m in enumerate(marginals):
self.order_params.extend([key + '_' + str(i) for key in m.order_params])
# Check and save the marginals
if not (isinstance(marginals, list) and all(isinstance(d, (DistributionContinuous1D, DistributionDiscrete1D))
for d in marginals)):
raise ValueError('Input marginals must be a list of Distribution1d objects.')
self.marginals = marginals
# If all marginals have a method, the joint has it to
if all(hasattr(m, 'pdf') or hasattr(m, 'pmf') for m in self.marginals):
def joint_pdf(dist, x):
x = dist._check_x_dimension(x)
# Compute pdf of independent marginals
pdf_val = np.ones((x.shape[0], ))
for ind_m in range(len(self.marginals)):
if hasattr(self.marginals[ind_m], 'pdf'):
pdf_val *= marginals[ind_m].pdf(x[:, ind_m])
else:
pdf_val *= marginals[ind_m].pmf(x[:, ind_m])
return pdf_val
if any(hasattr(m, 'pdf') for m in self.marginals):
self.pdf = MethodType(joint_pdf, self)
else:
self.pmf = MethodType(joint_pdf, self)
if all(hasattr(m, 'log_pdf') or hasattr(m, 'log_pmf') for m in self.marginals):
def joint_log_pdf(dist, x):
x = dist._check_x_dimension(x)
# Compute pdf of independent marginals
pdf_val = np.zeros((x.shape[0],))
for ind_m in range(len(self.marginals)):
if hasattr(self.marginals[ind_m], 'log_pdf'):
pdf_val += marginals[ind_m].log_pdf(x[:, ind_m])
else:
| |
import numpy as np
import matplotlib as mpl
if os.environ.get('DISPLAY','') == '':
print('no display found. Using non-interactive Agg backend')
mpl.use('Agg')
import matplotlib.pyplot as plt
from matplotlib.lines import Line2D
import paths
import counterfactuals.infrastructurefunctions as infr
import counterfactuals.infrastructureequilibrium as ie
# %%
avg_price_elasts = np.array([-4., -2.5, -1.8])
sigmas = np.array([0., 0.2, 0.4, 0.6, 0.8, 0.9])
# %%
# Define functions to load results
p_stars = lambda x,y: np.load(f"{paths.arrays_path}p_stars_e{x}_n{y}.npy")
R_stars = lambda x,y: np.load(f"{paths.arrays_path}R_stars_e{x}_n{y}.npy")
q_stars = lambda x,y: np.load(f"{paths.arrays_path}q_stars_e{x}_n{y}.npy")
cs_by_type = lambda x,y: np.load(f"{paths.arrays_path}cs_by_type_e{x}_n{y}.npy")
cs = lambda x,y: np.load(f"{paths.arrays_path}cs_e{x}_n{y}.npy")
ps = lambda x,y: np.load(f"{paths.arrays_path}ps_e{x}_n{y}.npy")
ts = lambda x,y: np.load(f"{paths.arrays_path}ts_e{x}_n{y}.npy")
partial_elasts = lambda x,y: np.load(f"{paths.arrays_path}partial_elasts_e{x}_n{y}.npy")
full_elasts = lambda x,y: np.load(f"{paths.arrays_path}full_elasts_e{x}_n{y}.npy")
partial_Pif_partial_bf = lambda x,y: np.load(f"{paths.arrays_path}partial_Pif_partial_bf_e{x}_n{y}.npy")
partial_Pif_partial_b = lambda x,y: np.load(f"{paths.arrays_path}partial_Pif_partial_b_e{x}_n{y}.npy")
partial_CS_partial_b = lambda x,y: np.load(f"{paths.arrays_path}partial_CS_partial_b_e{x}_n{y}.npy")
p_stars_se = lambda x,y: np.load(f"{paths.arrays_path}p_stars_se_e{x}_n{y}.npy")
R_stars_se = lambda x,y: np.load(f"{paths.arrays_path}R_stars_se_e{x}_n{y}.npy")
q_stars_se = lambda x,y: np.load(f"{paths.arrays_path}q_stars_se_e{x}_n{y}.npy")
cs_by_type_se = lambda x,y: np.load(f"{paths.arrays_path}cs_by_type_se_e{x}_n{y}.npy")
cs_se = lambda x,y: np.load(f"{paths.arrays_path}cs_se_e{x}_n{y}.npy")
ps_se = lambda x,y: np.load(f"{paths.arrays_path}ps_se_e{x}_n{y}.npy")
ts_se = lambda x,y: np.load(f"{paths.arrays_path}ts_se_e{x}_n{y}.npy")
partial_elasts_se = lambda x,y: np.load(f"{paths.arrays_path}partial_elasts_se_e{x}_n{y}.npy")
full_elasts_se = lambda x,y: np.load(f"{paths.arrays_path}full_elasts_se_e{x}_n{y}.npy")
partial_Pif_partial_bf_se = lambda x,y: np.load(f"{paths.arrays_path}partial_Pif_partial_bf_se_e{x}_n{y}.npy")
partial_Pif_partial_b_se = lambda x,y: np.load(f"{paths.arrays_path}partial_Pif_partial_b_se_e{x}_n{y}.npy")
partial_CS_partial_b_se = lambda x,y: np.load(f"{paths.arrays_path}partial_CS_partial_b_se_e{x}_n{y}.npy")
# %%
# Define common graph features
num_firms_to_simulate = 6
num_firms_array = np.arange(num_firms_to_simulate, dtype=int) + 1
elast_ids = np.array([1, 2])[::-1]
alpha = 0.6
lw = 3.
# %%
# Plot effect of number of firms
fig, axs = plt.subplots(elast_ids.shape[0], 4, figsize=(15,3.5 * elast_ids.shape[0]), sharex=True)
for i, elast_id in enumerate(elast_ids):
# dlim = 2,000 prices
axs[i,0].plot(num_firms_array, p_stars(elast_id,3)[:,0], color="black", lw=lw, alpha=alpha)
axs[i,0].plot(num_firms_array, p_stars(elast_id,3)[:,0] + 1.96 * p_stars_se(elast_id,3)[:,0], color="black", lw=0.7 * lw, alpha=0.5 * alpha, ls="--")
axs[i,0].plot(num_firms_array, p_stars(elast_id,3)[:,0] - 1.96 * p_stars_se(elast_id,3)[:,0], color="black", lw=0.7 * lw, alpha=0.5 * alpha, ls="--")
axs[i,0].set_xlabel("number of firms")
axs[i,0].set_ylabel("$p_{j}^{*}$ (in \u20ac)")
# dlim = 10,000 prices
axs[i,1].plot(num_firms_array, p_stars(elast_id,3)[:,1], color="black", lw=lw, alpha=alpha)
axs[i,1].plot(num_firms_array, p_stars(elast_id,3)[:,1] + 1.96 * p_stars_se(elast_id,3)[:,1], color="black", lw=0.7 * lw, alpha=0.5 * alpha, ls="--")
axs[i,1].plot(num_firms_array, p_stars(elast_id,3)[:,1] - 1.96 * p_stars_se(elast_id,3)[:,1], color="black", lw=0.7 * lw, alpha=0.5 * alpha, ls="--")
axs[i,1].set_xlabel("number of firms")
axs[i,1].set_ylabel("$p_{j}^{*}$ (in \u20ac)")
# investment
axs[i,2].plot(num_firms_array, R_stars(elast_id,3), color="black", label=f"{-avg_price_elasts[i]}", lw=lw, alpha=alpha)
axs[i,2].plot(num_firms_array, R_stars(elast_id,3) + 1.96 * R_stars_se(elast_id,3), color="black", label=f"{-avg_price_elasts[i]}", lw=0.7 * lw, alpha=0.5 * alpha, ls="--")
axs[i,2].plot(num_firms_array, R_stars(elast_id,3) - 1.96 * R_stars_se(elast_id,3), color="black", label=f"{-avg_price_elasts[i]}", lw=0.7 * lw, alpha=0.5 * alpha, ls="--")
axs[i,2].set_xlabel("number of firms")
axs[i,2].set_ylabel("$R_{f}^{*}$ (in km)")
# download speeds
axs[i,3].plot(num_firms_array, q_stars(elast_id,3), color="black", lw=lw, alpha=alpha)
axs[i,3].plot(num_firms_array, q_stars(elast_id,3) + 1.96 * q_stars_se(elast_id,3), color="black", lw=0.7 * lw, alpha=0.5 * alpha, ls="--")
axs[i,3].plot(num_firms_array, q_stars(elast_id,3) - 1.96 * q_stars_se(elast_id,3), color="black", lw=0.7 * lw, alpha=0.5 * alpha, ls="--")
axs[i,3].set_xlabel("number of firms")
axs[i,3].set_ylabel("$q_{f}^{*}$ (in Mbps)")
# Set titles
fontsize = 13.5
pad = 14
cols = ["2$\,$000 MB plan prices", "10$\,$000 MB plan prices", "investment", "download speeds"]
for ax, col in zip(axs[0], cols):
ax.annotate(col, xy=(0.5, 1), xytext=(0, pad),
xycoords='axes fraction', textcoords='offset points',
size=fontsize, ha='center', va='baseline', weight="bold")
mathbfE = "$\\mathbf{E}$"
rows = [f"{mathbfE} = {-avg_price_elasts[elast_id]}" for elast_id in elast_ids]
for ax, row in zip(axs[:,0], rows):
ax.annotate(row, xy=(0, 0.5), xytext=(-ax.yaxis.labelpad - pad, 0),
xycoords=ax.yaxis.label, textcoords='offset points',
size=fontsize, ha='right', va='center', weight="bold")
# Set axis limits
min_y_p = np.min(np.concatenate(tuple([p_stars(elast_id,3) for elast_id in elast_ids]))) - 5.
max_y_p = np.max(np.concatenate(tuple([p_stars(elast_id,3) for elast_id in elast_ids]))) + 3.
min_y_R = np.min(np.concatenate(tuple([R_stars(elast_id,3) for elast_id in elast_ids]))) - 0.1
max_y_R = np.max(np.concatenate(tuple([R_stars(elast_id,3) for elast_id in elast_ids]))) + 0.1
min_y_q = np.min(np.concatenate(tuple([q_stars(elast_id,3) for elast_id in elast_ids]))) - 5.
max_y_q = np.max(np.concatenate(tuple([q_stars(elast_id,3) for elast_id in elast_ids]))) + 5.
for i, elast_id in enumerate(elast_ids):
for j in range(2): # first two columns
axs[i,j].set_ylim((min_y_p, max_y_p))
axs[i,2].set_ylim((min_y_R, max_y_R))
axs[i,3].set_ylim((min_y_q, max_y_q))
for j in range(4): # all columns
axs[i,j].set_xticks(num_firms_array)
plt.tight_layout()
plt.savefig(f"{paths.graphs_path}counterfactual_variables.pdf", bbox_inches = "tight")
# %%
# Plot elasticities
fig, axs = plt.subplots(elast_ids.shape[0], 2, figsize=(8,3.5 * elast_ids.shape[0]), sharex=True)
for i, elast_id in enumerate(elast_ids):
# dlim = 2,000 elasticities
axs[i,0].plot(num_firms_array, partial_elasts(elast_id,3)[:,0], color=plt.rcParams['axes.prop_cycle'].by_key()['color'][0], lw=lw, alpha=alpha, label="partial")
axs[i,0].plot(num_firms_array, full_elasts(elast_id,3)[:,0], color=plt.rcParams['axes.prop_cycle'].by_key()['color'][1], lw=lw, alpha=alpha, label="full")
axs[i,0].set_xlabel("number of firms")
axs[i,0].legend(loc="lower left")
# dlim = 10,000 elasticities
axs[i,1].plot(num_firms_array, partial_elasts(elast_id,3)[:,1], color=plt.rcParams['axes.prop_cycle'].by_key()['color'][0], lw=lw, alpha=alpha, label="partial")
axs[i,1].plot(num_firms_array, full_elasts(elast_id,3)[:,1], color=plt.rcParams['axes.prop_cycle'].by_key()['color'][1], lw=lw, alpha=alpha, label="full")
axs[i,1].set_xlabel("number of firms")
axs[i,1].legend(loc="lower left")
# Set titles
fontsize = 13.5
pad = 14
cols = ["2$\,$000 MB plan", "10$\,$000 MB plan"]
for ax, col in zip(axs[0], cols):
ax.annotate(col, xy=(0.5, 1), xytext=(0, pad),
xycoords='axes fraction', textcoords='offset points',
size=fontsize, ha='center', va='baseline', weight="bold")
mathbfE = "$\\mathbf{E}$"
rows = [f"{mathbfE} = {-avg_price_elasts[elast_id]}" for elast_id in elast_ids]
for ax, row in zip(axs[:,0], rows):
ax.annotate(row, xy=(0, 0.5), xytext=(-ax.yaxis.labelpad - pad, 0),
xycoords=ax.yaxis.label, textcoords='offset points',
size=fontsize, ha='right', va='center', weight="bold")
# Set axis limits
min_y = np.min(np.concatenate(tuple([full_elasts(elast_id,3) for elast_id in elast_ids] + [partial_elasts(elast_id,3) for elast_id in elast_ids]))) - 0.3
max_y = np.max(np.concatenate(tuple([full_elasts(elast_id,3) for elast_id in elast_ids] + [partial_elasts(elast_id,3) for elast_id in elast_ids]))) + 0.3
for i, elast_id in enumerate(elast_ids):
for j in range(2): # all columns
axs[i,j].set_ylim((min_y, max_y))
axs[i,j].set_xticks(num_firms_array)
plt.tight_layout()
plt.savefig(f"{paths.graphs_path}counterfactual_elasticities.pdf", bbox_inches = "tight")
# %%
# Plot bw derivatives
fig, axs = plt.subplots(elast_ids.shape[0], 3, figsize=(11,3.5 * elast_ids.shape[0]), sharex=True)
for i, elast_id in enumerate(elast_ids):
# partial_Pif_partial_bf
axs[i,0].plot(num_firms_array, partial_Pif_partial_bf(elast_id,3), color="black", lw=lw, alpha=alpha)
axs[i,0].plot(num_firms_array, partial_Pif_partial_bf(elast_id,3) + 1.96 * partial_Pif_partial_bf_se(elast_id,3), color="black", lw=0.7 * lw, alpha=0.5 * alpha, ls="--")
axs[i,0].plot(num_firms_array, partial_Pif_partial_bf(elast_id,3) - 1.96 * partial_Pif_partial_bf_se(elast_id,3), color="black", lw=0.7 * lw, alpha=0.5 * alpha, ls="--")
axs[i,0].set_xlabel("number of firms")
axs[i,0].set_ylabel("\u20ac per person in market / MHz")
# partial_Pif_partial_b
axs[i,1].plot(num_firms_array, partial_Pif_partial_b(elast_id,3), color="black", lw=lw, alpha=alpha)
axs[i,1].plot(num_firms_array, partial_Pif_partial_b(elast_id,3) + 1.96 * partial_Pif_partial_b_se(elast_id,3), color="black", lw=0.7 * lw, alpha=0.5 * alpha, ls="--")
axs[i,1].plot(num_firms_array, partial_Pif_partial_b(elast_id,3) - 1.96 * partial_Pif_partial_b_se(elast_id,3), color="black", lw=0.7 * lw, alpha=0.5 * alpha, ls="--")
axs[i,1].set_xlabel("number of firms")
axs[i,1].set_ylabel("\u20ac per person in market / MHz")
# partial_CS_partial_b
axs[i,2].plot(num_firms_array, partial_CS_partial_b(elast_id,3), color="black", lw=lw, alpha=alpha)
axs[i,2].plot(num_firms_array, partial_CS_partial_b(elast_id,3) + 1.96 * partial_CS_partial_b_se(elast_id,3), color="black", lw=0.7 * lw, alpha=0.5 * alpha, ls="--")
axs[i,2].plot(num_firms_array, partial_CS_partial_b(elast_id,3) - 1.96 * partial_CS_partial_b_se(elast_id,3), color="black", lw=0.7 * lw, alpha=0.5 * alpha, ls="--")
axs[i,2].set_xlabel("number of firms")
axs[i,2].set_ylabel("\u20ac per person in market / MHz")
# Set titles
fontsize = 13.5
pad = 14
cols = ["$\\frac{\\partial \\Pi_{f}}{\\partial b_{f}}$", "$\\frac{\\partial \\Pi_{f}}{\\partial b}$", "$\\frac{\\partial CS}{\\partial b}$"]
for ax, col in zip(axs[0], cols):
ax.annotate(col, xy=(0.5, 1), xytext=(0, pad),
xycoords='axes fraction', textcoords='offset points',
size=fontsize + 3., ha='center', va='baseline', weight="bold")
mathbfE = "$\\mathbf{E}$"
rows = [f"{mathbfE} = {-avg_price_elasts[elast_id]}" for elast_id in elast_ids]
for ax, row in zip(axs[:,0], rows):
ax.annotate(row, xy=(0, 0.5), xytext=(-ax.yaxis.labelpad - pad, 0),
xycoords=ax.yaxis.label, textcoords='offset points',
size=fontsize, ha='right', va='center', weight="bold")
# Set axis limits
min_y_Pif_bf = np.min(np.concatenate(tuple([partial_Pif_partial_bf(elast_id,3) for elast_id in elast_ids]))) - 0.005
max_y_Pif_bf = np.max(np.concatenate(tuple([partial_Pif_partial_bf(elast_id,3) for elast_id in elast_ids]))) + 0.008
min_y_Pif_b = np.min(np.concatenate(tuple([partial_Pif_partial_b(elast_id,3) for elast_id in elast_ids]))) - 0.002
max_y_Pif_b = np.max(np.concatenate(tuple([partial_Pif_partial_b(elast_id,3) for elast_id in elast_ids]))) + 0.002
min_y_CS_b = np.min(np.concatenate(tuple([partial_CS_partial_b(elast_id,3) for elast_id in elast_ids]))) - 0.02
max_y_CS_b = np.max(np.concatenate(tuple([partial_CS_partial_b(elast_id,3) for elast_id in elast_ids]))) + 0.03
for i, elast_id in enumerate(elast_ids):
axs[i,0].set_ylim((min_y_Pif_bf, max_y_Pif_bf))
axs[i,1].set_ylim((min_y_Pif_b, max_y_Pif_b))
axs[i,2].set_ylim((min_y_CS_b, max_y_CS_b))
for j in range(3):
axs[i,j].set_xticks(num_firms_array)
plt.tight_layout()
plt.savefig(f"{paths.graphs_path}counterfactual_bw_deriv.pdf", bbox_inches = "tight")
# %%
# Plot welfare for number of firms
fig, axs = plt.subplots(elast_ids.shape[0], 3, figsize=(11,3.5 * elast_ids.shape[0]), sharex=True)
for i, elast_id in enumerate(elast_ids):
# consumer surplus
axs[i,0].plot(num_firms_array, cs(elast_id,3), color="black", lw=lw, alpha=alpha)
axs[i,0].plot(num_firms_array, cs(elast_id,3) + 1.96 * cs_se(elast_id,3), color="black", lw=0.7 * lw, alpha=0.5 * alpha, ls="--")
axs[i,0].plot(num_firms_array, cs(elast_id,3) - 1.96 * cs_se(elast_id,3), color="black", lw=0.7 * lw, alpha=0.5 * alpha, ls="--")
axs[i,0].axvline(x=num_firms_array[np.argmax(cs(elast_id,3))], color="black", linestyle="--", alpha=0.25)
axs[i,0].set_xlabel("number of firms")
axs[i,0].set_ylabel("\u20ac")
# producer surplus
axs[i,1].plot(num_firms_array, ps(elast_id,3), color="black", lw=lw, alpha=alpha)
axs[i,1].plot(num_firms_array, ps(elast_id,3) + 1.96 * ps_se(elast_id,3), color="black", lw=0.7 * lw, alpha=0.5 * alpha, ls="--")
axs[i,1].plot(num_firms_array, ps(elast_id,3) - 1.96 * ps_se(elast_id,3), color="black", lw=0.7 * lw, alpha=0.5 * alpha, ls="--")
axs[i,1].axvline(x=num_firms_array[np.argmax(ps(elast_id,3))], color="black", linestyle="--", alpha=0.25)
axs[i,1].set_xlabel("number of firms")
axs[i,1].set_ylabel("\u20ac")
# total surplus
axs[i,2].plot(num_firms_array, ts(elast_id,3), color="black", lw=lw, alpha=alpha)
axs[i,2].plot(num_firms_array, ts(elast_id,3) + 1.96 * ts_se(elast_id,3), color="black", lw=0.7 * lw, alpha=0.5 * alpha, ls="--")
axs[i,2].plot(num_firms_array, ts(elast_id,3) - 1.96 * ts_se(elast_id,3), color="black", lw=0.7 * lw, alpha=0.5 * alpha, ls="--")
axs[i,2].axvline(x=num_firms_array[np.argmax(ts(elast_id,3))], color="black", linestyle="--", alpha=0.25)
axs[i,2].set_xlabel("number of firms")
axs[i,2].set_ylabel("\u20ac")
# Set titles
fontsize = 13.5
pad = 14
cols = ["consumer surplus", "producer surplus", "total surplus"]
for ax, col in zip(axs[0], cols):
ax.annotate(col, xy=(0.5, 1), xytext=(0, pad),
xycoords='axes fraction', textcoords='offset points',
size=fontsize, ha='center', va='baseline', weight="bold")
mathbfE = "$\\mathbf{E}$"
rows = [f"{mathbfE} = {-avg_price_elasts[elast_id]}" for elast_id in elast_ids]
for ax, row in zip(axs[:,0], rows):
ax.annotate(row, xy=(0, 0.5), xytext=(-ax.yaxis.labelpad - pad, 0),
xycoords=ax.yaxis.label, textcoords='offset points',
size=fontsize, ha='right', va='center', weight="bold")
# Set axis limits
min_y_cs = np.min(np.concatenate(tuple([cs(elast_id,3) for elast_id in elast_ids]))) - 5.
max_y_cs = np.max(np.concatenate(tuple([cs(elast_id,3) for elast_id in elast_ids]))) + 20.
min_y_ps = np.min(np.concatenate(tuple([ps(elast_id,3) for elast_id in elast_ids]))) - 5.
max_y_ps = np.max(np.concatenate(tuple([ps(elast_id,3) for elast_id in elast_ids]))) + 5.
min_y_ts = np.min(np.concatenate(tuple([ts(elast_id,3) for elast_id in elast_ids]))) - 5.
max_y_ts = np.max(np.concatenate(tuple([ts(elast_id,3) for elast_id in elast_ids]))) + 15.
for i, elast_id in enumerate(elast_ids):
axs[i,0].set_ylim((min_y_cs, max_y_cs))
axs[i,1].set_ylim((min_y_ps, max_y_ps))
axs[i,2].set_ylim((min_y_ts, max_y_ts))
for j in range(3):
axs[i,j].set_xticks(num_firms_array)
plt.tight_layout()
plt.savefig(f"{paths.graphs_path}counterfactual_welfare.pdf", bbox_inches = "tight")
# %%
# Plot consumer surplus by type for number of firms
fig, axs = plt.subplots(elast_ids.shape[0], 5, figsize=(15,2.5 * elast_ids.shape[0]), sharex=True)
for i, elast_id in enumerate(elast_ids):
for j in range(5):
axs[i,j].plot(num_firms_array, cs_by_type(elast_id,3)[:,2*j], color="black", lw=lw, alpha=alpha)
axs[i,j].plot(num_firms_array, cs_by_type(elast_id,3)[:,2*j] + 1.96 * cs_by_type_se(elast_id,3)[:,2*j], | |
<reponame>schnusch/nimble2nix
#!/usr/bin/env nix-shell
#!nix-shell -i python3 -p git nim nix-prefetch-git python3Packages.packaging
import argparse
import contextlib
from functools import cache
import itertools
import json
import logging
import os
import shlex
import subprocess
import sys
import tempfile
import urllib.parse
from urllib.request import urlopen
from packaging.version import parse as parse_version
def run(cmd, *args, check=True, **kwargs):
logging.getLogger('run').debug('$ %s', ' '.join(map(shlex.quote, cmd)))
return subprocess.run(cmd, *args, check=check, **kwargs)
def parse_nimble(nimble):
# nimble seems to write warnings to stdout instead of stderr, so we use --silent
p = run(['nimble', '--silent', 'dump', '--json', nimble],
stdout=subprocess.PIPE, encoding='utf-8')
return json.loads(p.stdout)
def find_nimble(dir):
nimbles = [x for x in os.listdir(dir) if x.endswith('.nimble')]
assert len(nimbles) == 1
return os.path.join(dir, nimbles[0])
class GitCache(object):
""" Temporary directory helper that runs git clone """
logger = logging.getLogger('GitCache')
def __init__(self):
self._temp = None
self._root = None
self._paths = {}
self.cloned = 0
self.reused = 0
def __enter__(self):
self._temp = tempfile.TemporaryDirectory(prefix='nimble2nix.')
self._root = self._temp.__enter__()
self.logger.debug("cloning git repos to %s", self._root)
return self
def __exit__(self, type, value, traceback):
self._temp.__exit__(type, value, traceback)
self._temp = None
self._root = None
self.logger.debug("cloned %d repositories, avoided %d redundant clones",
self.cloned, self.reused)
def get_path(self, name, url):
counter = 1
name = name.replace(os.sep, '_')
while True:
suffix = '' if counter == 1 else '-' + str(counter)
path = os.path.join(self._root, name + suffix)
if not os.path.exists(path):
self._paths[url] = path
return path
counter += 1
def clone(self, url, name):
try:
path = self._paths[url]
self.logger.debug('reusing %r for %r', path, url)
self.reused += 1
return path
except KeyError:
pass
path = self.get_path(name, url)
run(['git', 'clone', '--', url, path])
self.cloned += 1
return path
class Packages(object):
def __init__(self, name=None):
if name is None:
logging.info("downloading packages.json...")
with urlopen('https://github.com/nim-lang/packages/raw/master/packages.json') as resp:
self.packages = json.loads(resp.read().decode('utf-8'))
else:
logging.info("using %s...", name)
with open(name, 'r', encoding='utf-8') as fp:
self.packages = json.load(fp)
@cache
def get(self, name):
for pkg in self.packages:
if pkg['name'] == name:
return pkg
return {
'url': name,
'method': 'git',
}
def check_version_range(version_range, version):
kind = version_range['kind']
if kind == 'verAny':
return True
elif kind == 'verIntersect':
return check_version_range(version_range['verILeft'], version) and check_version_range(version_range['verIRight'], version)
else:
try:
ver = parse_version(version_range['ver'])
return {
'verLater': version > ver,
'verEqLater': version >= ver,
'verEarlier': version < ver,
'verEqEarlier': version <= ver,
'verEq': version == ver,
}[kind]
except KeyError:
logging.error("version range %r not supported", version_range)
raise
def intersect_version_range(a, b):
# TODO apply some logic
return {
'kind': 'verIntersect',
'verILeft': a,
'verIRight': b,
}
def format_version_range(version_range):
kind = version_range['kind']
if kind == 'verAny':
return '*'
elif kind == 'verIntersect':
return '%s %s' % (format_version_range(version_range['verILeft']),
format_version_range(version_range['verIRight']))
elif kind == 'verSpecial':
return version_range['spe']
else:
return {
'verLater': '>',
'verEqLater': '>=',
'verEarlier': '<',
'verEqEarlier': '<=',
'verTilde': '~=',
'verCaret': '^=',
'verEq': '',
}[kind] + version_range['ver']
class Requirement(object):
skip = {'nim'} # FIXME respect the nim version requirements
@classmethod
def from_nimble_file(cls, nimble_file, packages, git_cache):
reqs = []
for req in parse_nimble(nimble_file)['requires']:
if req['name'] not in cls.skip:
reqs.append(cls(req, packages, git_cache))
return reqs
def __init__(self, req, packages, git_cache):
self.name = req['name']
self.version = req['ver']
self._packages = packages
self._git_cache = git_cache
@property
@cache
def pkg(self):
return self._packages.get(self.name)
def find_latest_rev(self):
assert self.pkg['method'] == 'git', "%r not supported, currently the only supported method is 'git'" % self.pkg['method']
git_dir = self._git_cache.clone(self.pkg['url'], self.name)
rev = None
add_tag = False
kind = self.version['kind']
if kind == 'verSpecial':
assert self.version['spe'].startswith('#')
rev = self.version['spe'][1:]
# TODO what about nim's `head`
# keep the original to rev from the nimble file so we can re-add it
# to the git repo later
add_tag = True
else:
# get latest tag that satisfies the version range
tags = run(['git', '-C', git_dir, 'tag', '--list'],
stdout=subprocess.PIPE, encoding='utf-8')
tags = tags.stdout.split()
for tag in tags:
parsed_tag = parse_version(tag)
if check_version_range(self.version, parsed_tag):
if rev is None or parsed_tag > parse_version(rev):
rev = tag
if rev is None:
# see if nimble file in HEAD has a required version
logging.warning("%s: %s does not provide any tags, so we check if HEAD satisfies the version",
self.name, self.pkg['url'])
info = parse_nimble(find_nimble(git_dir))
if check_version_range(self.version, parse_version(info['version'])):
rev = 'HEAD'
if rev is None:
raise RuntimeError("%s: cannot satisfy %r" % (self.name, format_version_range(self.version)))
# nix-prefetch-git does not work with remote branches and such, so we
# convert rev to a commit hash
try:
commit_hash = run(['git', '-C', git_dir, 'rev-parse', rev],
stdout=subprocess.PIPE, encoding='utf-8').stdout.strip()
except subprocess.CalledProcessError:
# try again with remote branches
commit_hash = run(['git', '-C', git_dir, 'rev-parse', 'remotes/origin/' + rev],
stdout=subprocess.PIPE, encoding='utf-8').stdout.strip()
logging.info("%s: %s%s", self.name, commit_hash,
' (%s)' % rev if rev != commit_hash else '')
# do not add rev from nimble file to the git repo because it is
# unimportant or an abbreviated commit hash
if not add_tag or commit_hash.startswith(rev):
rev = None
return (commit_hash, rev)
def prefetch(self):
commit_hash, rev = self.find_latest_rev()
# re-add rev from the nimble file to the git repository,
# nix-prefetch-git removes almost everything and otherwise nimble will
# not find the commit
add_tag = '' if rev is None else \
'git -C "$dir" tag -f %s %s >&2' % (shlex.quote(rev), shlex.quote(commit_hash))
env = dict(os.environ)
env['NIX_PREFETCH_GIT_CHECKOUT_HOOK'] = add_tag
p = run(['nix-prefetch-git',
'--fetch-submodules',
'--leave-dotGit',
'--rev', commit_hash,
'--url', self.pkg['url']],
env=env, stdout=subprocess.PIPE, encoding='utf-8')
info = json.loads(p.stdout)
info['NIX_PREFETCH_GIT_CHECKOUT_HOOK'] = add_tag
return info
def dot_quote(x):
return x.replace('\\', '\\\\').replace('"', '\\"').join('""')
def collect_requirements(nimble, write_dot, url=None, *, collected=None, **kwargs):
# we will index requirements by their URL, whenever a requirement is
# encountered store it, if it is already known we update its version range
# and re-run the process on it
# TODO thinking about it, this might add sub-requirements that a no longer
# needed because their parent-dependencies are of another version
if collected is None:
collected = {}
if url is None:
url = 'file://' + urllib.parse.quote(nimble)
for req in Requirement.from_nimble_file(nimble, **kwargs):
write_dot('\t%s -> %s [label=%s];\n' % (dot_quote(url),
dot_quote(req.pkg['url']),
dot_quote(format_version_range(req.version))))
inserted = collected.setdefault(req.pkg['url'], req)
if inserted.version != req.version:
# package URL is already known, update the version range and re-run
inserted.version = intersect_version_range(inserted.version, req.version)
logging.info("common requirement %s with %r",
req.pkg['url'], format_version_range(inserted.version))
del req
inserted.prefetched = inserted.prefetch()
collect_requirements(find_nimble(inserted.prefetched['path']),
url=inserted.pkg['url'], write_dot=write_dot,
collected=collected, **kwargs)
return collected
def nix_dump(x):
if isinstance(x, (bool, int, float, str)):
return json.dumps(x)
elif isinstance(x, list):
return ' '.join(itertools.chain('[', map(nix_dump, x), ']'))
else:
raise TypeError('cannot convert %r to a nix value' % x)
def to_nimble_nix(requirements, fp):
logging.info("creating nimble.nix...")
fp.write('''\
{ fetchgit, writeText }:
let
packages = [
''')
for req in requirements.values():
pkg = {'tags': []}
pkg.update(req.pkg)
pkg['name'] = req.name
if 'license' not in pkg or 'description' not in pkg:
info = parse_nimble(find_nimble(req.prefetched['path']))
pkg.setdefault('license', info['license'])
pkg.setdefault('description', info['desc'])
fp.write(' {\n')
for k, v in pkg.items():
fp.write(' %s = ' % k)
if k == 'url':
fp.write('''"file://" + (fetchgit {
url = %s;
rev = %s;
sha256 = %s;
fetchSubmodules = true;
leaveDotGit = true;
})''' % (nix_dump(req.prefetched['url']),
nix_dump(req.prefetched['rev']),
nix_dump(req.prefetched['sha256'])))
if req.prefetched['NIX_PREFETCH_GIT_CHECKOUT_HOOK']:
fp.write('''.overrideAttrs ({ ... }: {
# re-add rev from the nimble file to the git repository,
# nix-prefetch-git removes almost everything and otherwise nimble will
# not find the commit
NIX_PREFETCH_GIT_CHECKOUT_HOOK = %s;
})''' % nix_dump(req.prefetched['NIX_PREFETCH_GIT_CHECKOUT_HOOK']))
else:
fp.write(nix_dump(v))
fp.write(';\n')
fp.write(' }\n')
fp.write(''' ];
in
writeText "packages.json" (builtins.toJSON packages)
''')
def main(argv=None):
p = argparse.ArgumentParser(description="Collect nimble requirements",
epilog="This tool creates a nix derivation that creates a nimble "
"package.json. The created package.json includes the requirements "
"of the given nimble files recursively with their `url` pointing "
"to the nix store. By creating a symlink from "
"$nimbleDir/packages_global.json to the created package.json "
"nimble can fetch the requirements when sandboxed. Because only "
"one version of a requirement is supported this may not always be "
"able to resolve the dependencies.")
p.add_argument('-o', '--output',
required=True,
help="Nix derivation that creates the package.json")
p.add_argument('-P', '--packages',
help="use custom packages.json instead of downloading")
p.add_argument('--dot',
help="output DOT graph of the requirements")
p.add_argument('-v', '--verbose',
action='store_const',
default=logging.INFO,
const=logging.DEBUG,
help="verbose logging")
p.add_argument('nimble_file', nargs='+')
args = p.parse_args()
logging.basicConfig(format=('\x1b[32m%s\x1b[39m' if sys.stderr.isatty() else '%s')
% '[%(asctime)s] %(levelname)-8s %(name)-8s %(message)s',
stream=sys.stderr, level=args.verbose)
with contextlib.ExitStack() as stack:
packages = Packages(args.packages)
git_cache = stack.enter_context(GitCache())
# write DOT graph
if args.dot is None:
write_dot = lambda x: None
else:
fp = stack.enter_context(open(args.dot, 'w', encoding='utf-8', buffering=1))
fp.write('''digraph {
node [fontname=monospace];
edge [fontname=monospace];
''')
stack.callback(fp.write, '}\n')
write_dot = fp.write
logging.debug("writing dependency graph to %r...", args.dot)
collected = {}
for nimble in args.nimble_file:
collect_requirements(nimble, write_dot, | |
else:
v_sql = v_sql.replace('--#FILTER_PATTERN_REGEX_CASE_INSENSITIVE#', '').replace('#VALUE_PATTERN_REGEX_CASE_INSENSITIVE#', p_textPattern.replace("'", "''"))
else:
if p_caseSentive:
v_sql = v_sql.replace('--#FILTER_PATTERN_CASE_SENSITIVE#', '').replace('#VALUE_PATTERN_CASE_SENSITIVE#', p_textPattern.replace("'", "''"))
else:
v_sql = v_sql.replace('--#FILTER_PATTERN_CASE_INSENSITIVE#', '').replace('#VALUE_PATTERN_CASE_INSENSITIVE#', p_textPattern.replace("'", "''"))
return v_sql
def AdvancedObjectSearchPartitionName(self, p_textPattern, p_caseSentive, p_regex, p_inSchemas):
v_sql = '''
select 'Partition Name'::text as category,
quote_ident(np.nspname)::text as schema_name,
quote_ident(cp.relname)::text as table_name,
''::text as column_name,
quote_ident(cc.relname)::text as match_value
from pg_inherits i
inner join pg_class cp
on cp.oid = i.inhparent
inner join pg_namespace np
on np.oid = cp.relnamespace
inner join pg_class cc
on cc.oid = i.inhrelid
inner join pg_namespace nc
on nc.oid = cc.relnamespace
where cc.relispartition
--#FILTER_PATTERN_CASE_SENSITIVE# and quote_ident(cc.relname) like '#VALUE_PATTERN_CASE_SENSITIVE#'
--#FILTER_PATTERN_CASE_INSENSITIVE# and lower(quote_ident(cc.relname)) like lower('#VALUE_PATTERN_CASE_INSENSITIVE#')
--#FILTER_PATTERN_REGEX_CASE_SENSITIVE# and quote_ident(cc.relname) ~ '#VALUE_PATTERN_REGEX_CASE_SENSITIVE#'
--#FILTER_PATTERN_REGEX_CASE_INSENSITIVE# and quote_ident(cc.relname) ~* '#VALUE_PATTERN_REGEX_CASE_INSENSITIVE#'
--#FILTER_BY_SCHEMA# and lower(quote_ident(np.nspname)) in (#VALUE_BY_SCHEMA#)
'''
if p_inSchemas != '':
v_sql = v_sql.replace('--#FILTER_BY_SCHEMA#', '').replace('#VALUE_BY_SCHEMA#', p_inSchemas)
if p_regex:
if p_caseSentive:
v_sql = v_sql.replace('--#FILTER_PATTERN_REGEX_CASE_SENSITIVE#', '').replace('#VALUE_PATTERN_REGEX_CASE_SENSITIVE#', p_textPattern.replace("'", "''"))
else:
v_sql = v_sql.replace('--#FILTER_PATTERN_REGEX_CASE_INSENSITIVE#', '').replace('#VALUE_PATTERN_REGEX_CASE_INSENSITIVE#', p_textPattern.replace("'", "''"))
else:
if p_caseSentive:
v_sql = v_sql.replace('--#FILTER_PATTERN_CASE_SENSITIVE#', '').replace('#VALUE_PATTERN_CASE_SENSITIVE#', p_textPattern.replace("'", "''"))
else:
v_sql = v_sql.replace('--#FILTER_PATTERN_CASE_INSENSITIVE#', '').replace('#VALUE_PATTERN_CASE_INSENSITIVE#', p_textPattern.replace("'", "''"))
return v_sql
def AdvancedObjectSearchRoleName(self, p_textPattern, p_caseSentive, p_regex):
v_sql = '''
select 'Role Name'::text as category,
''::text as schema_name,
''::text as table_name,
''::text as column_name,
quote_ident(rolname)::text as match_value
from pg_roles
where 1 = 1
--#FILTER_PATTERN_CASE_SENSITIVE# and quote_ident(rolname) like '#VALUE_PATTERN_CASE_SENSITIVE#'
--#FILTER_PATTERN_CASE_INSENSITIVE# and lower(quote_ident(rolname)) like lower('#VALUE_PATTERN_CASE_INSENSITIVE#')
--#FILTER_PATTERN_REGEX_CASE_SENSITIVE# and quote_ident(rolname) ~ '#VALUE_PATTERN_REGEX_CASE_SENSITIVE#'
--#FILTER_PATTERN_REGEX_CASE_INSENSITIVE# and quote_ident(rolname) ~* '#VALUE_PATTERN_REGEX_CASE_INSENSITIVE#'
'''
if p_regex:
if p_caseSentive:
v_sql = v_sql.replace('--#FILTER_PATTERN_REGEX_CASE_SENSITIVE#', '').replace('#VALUE_PATTERN_REGEX_CASE_SENSITIVE#', p_textPattern.replace("'", "''"))
else:
v_sql = v_sql.replace('--#FILTER_PATTERN_REGEX_CASE_INSENSITIVE#', '').replace('#VALUE_PATTERN_REGEX_CASE_INSENSITIVE#', p_textPattern.replace("'", "''"))
else:
if p_caseSentive:
v_sql = v_sql.replace('--#FILTER_PATTERN_CASE_SENSITIVE#', '').replace('#VALUE_PATTERN_CASE_SENSITIVE#', p_textPattern.replace("'", "''"))
else:
v_sql = v_sql.replace('--#FILTER_PATTERN_CASE_INSENSITIVE#', '').replace('#VALUE_PATTERN_CASE_INSENSITIVE#', p_textPattern.replace("'", "''"))
return v_sql
def AdvancedObjectSearchTablespaceName(self, p_textPattern, p_caseSentive, p_regex):
v_sql = '''
select 'Tablespace Name'::text as category,
''::text as schema_name,
''::text as table_name,
''::text as column_name,
quote_ident(spcname)::text as match_value
from pg_tablespace
where 1 = 1
--#FILTER_PATTERN_CASE_SENSITIVE# and quote_ident(spcname) like '#VALUE_PATTERN_CASE_SENSITIVE#'
--#FILTER_PATTERN_CASE_INSENSITIVE# and lower(quote_ident(spcname)) like lower('#VALUE_PATTERN_CASE_INSENSITIVE#')
--#FILTER_PATTERN_REGEX_CASE_SENSITIVE# and quote_ident(spcname) ~ '#VALUE_PATTERN_REGEX_CASE_SENSITIVE#'
--#FILTER_PATTERN_REGEX_CASE_INSENSITIVE# and quote_ident(spcname) ~* '#VALUE_PATTERN_REGEX_CASE_INSENSITIVE#'
'''
if p_regex:
if p_caseSentive:
v_sql = v_sql.replace('--#FILTER_PATTERN_REGEX_CASE_SENSITIVE#', '').replace('#VALUE_PATTERN_REGEX_CASE_SENSITIVE#', p_textPattern.replace("'", "''"))
else:
v_sql = v_sql.replace('--#FILTER_PATTERN_REGEX_CASE_INSENSITIVE#', '').replace('#VALUE_PATTERN_REGEX_CASE_INSENSITIVE#', p_textPattern.replace("'", "''"))
else:
if p_caseSentive:
v_sql = v_sql.replace('--#FILTER_PATTERN_CASE_SENSITIVE#', '').replace('#VALUE_PATTERN_CASE_SENSITIVE#', p_textPattern.replace("'", "''"))
else:
v_sql = v_sql.replace('--#FILTER_PATTERN_CASE_INSENSITIVE#', '').replace('#VALUE_PATTERN_CASE_INSENSITIVE#', p_textPattern.replace("'", "''"))
return v_sql
def AdvancedObjectSearchExtensionName(self, p_textPattern, p_caseSentive, p_regex):
v_sql = '''
select 'Extension Name'::text as category,
''::text as schema_name,
''::text as table_name,
''::text as column_name,
quote_ident(extname)::text as match_value
from pg_extension
where 1 = 1
--#FILTER_PATTERN_CASE_SENSITIVE# and quote_ident(extname) like '#VALUE_PATTERN_CASE_SENSITIVE#'
--#FILTER_PATTERN_CASE_INSENSITIVE# and lower(quote_ident(extname)) like lower('#VALUE_PATTERN_CASE_INSENSITIVE#')
--#FILTER_PATTERN_REGEX_CASE_SENSITIVE# and quote_ident(extname) ~ '#VALUE_PATTERN_REGEX_CASE_SENSITIVE#'
--#FILTER_PATTERN_REGEX_CASE_INSENSITIVE# and quote_ident(extname) ~* '#VALUE_PATTERN_REGEX_CASE_INSENSITIVE#'
'''
if p_regex:
if p_caseSentive:
v_sql = v_sql.replace('--#FILTER_PATTERN_REGEX_CASE_SENSITIVE#', '').replace('#VALUE_PATTERN_REGEX_CASE_SENSITIVE#', p_textPattern.replace("'", "''"))
else:
v_sql = v_sql.replace('--#FILTER_PATTERN_REGEX_CASE_INSENSITIVE#', '').replace('#VALUE_PATTERN_REGEX_CASE_INSENSITIVE#', p_textPattern.replace("'", "''"))
else:
if p_caseSentive:
v_sql = v_sql.replace('--#FILTER_PATTERN_CASE_SENSITIVE#', '').replace('#VALUE_PATTERN_CASE_SENSITIVE#', p_textPattern.replace("'", "''"))
else:
v_sql = v_sql.replace('--#FILTER_PATTERN_CASE_INSENSITIVE#', '').replace('#VALUE_PATTERN_CASE_INSENSITIVE#', p_textPattern.replace("'", "''"))
return v_sql
def AdvancedObjectSearchFKColumnName(self, p_textPattern, p_caseSentive, p_regex, p_inSchemas):
v_sql = '''
with select_fks as (
select distinct
quote_ident(kcu1.constraint_schema) as table_schema,
quote_ident(kcu1.table_name) as table_name,
quote_ident(kcu1.column_name) as column_name,
quote_ident(kcu2.constraint_schema) as r_table_schema,
quote_ident(kcu2.table_name) as r_table_name,
quote_ident(kcu2.column_name) as r_column_name
from information_schema.referential_constraints rc
inner join information_schema.key_column_usage kcu1
on kcu1.constraint_catalog = rc.constraint_catalog
and kcu1.constraint_schema = rc.constraint_schema
and kcu1.constraint_name = rc.constraint_name
inner join information_schema.key_column_usage kcu2
on kcu2.constraint_catalog = rc.unique_constraint_catalog
and kcu2.constraint_schema = rc.unique_constraint_schema
and kcu2.constraint_name = rc.unique_constraint_name
and kcu2.ordinal_position = kcu1.ordinal_position
where 1 = 1
--#FILTER_BY_SCHEMA# and lower(quote_ident(kcu1.constraint_schema)) in (#VALUE_BY_SCHEMA#) or lower(quote_ident(kcu2.constraint_schema)) in (#VALUE_BY_SCHEMA#)
)
select 'FK Column Name'::text as category,
sf.table_schema::text as schema_name,
sf.table_name::text as table_name,
''::text as column_name,
sf.column_name::text as match_value
from select_fks sf
where sf.table_schema not in ('information_schema', 'omnidb', 'pg_catalog', 'pg_toast')
and sf.table_schema not like 'pg%%temp%%'
--#FILTER_PATTERN_CASE_SENSITIVE# and sf.column_name like '#VALUE_PATTERN_CASE_SENSITIVE#'
--#FILTER_PATTERN_CASE_INSENSITIVE# and lower(sf.column_name) like lower('#VALUE_PATTERN_CASE_INSENSITIVE#')
--#FILTER_PATTERN_REGEX_CASE_SENSITIVE# and sf.column_name ~ '#VALUE_PATTERN_REGEX_CASE_SENSITIVE#'
--#FILTER_PATTERN_REGEX_CASE_INSENSITIVE# and sf.column_name ~* '#VALUE_PATTERN_REGEX_CASE_INSENSITIVE#'
--#FILTER_BY_SCHEMA# and lower(sf.table_schema) in (#VALUE_BY_SCHEMA#)
union
select 'FK Column Name'::text as category,
(sf.r_table_schema || ' (referenced)')::text as schema_name,
(sf.r_table_name || ' (referenced)')::text as table_name,
''::text as column_name,
(sf.r_column_name || ' (referenced)')::text as match_value
from select_fks sf
where sf.r_table_schema not in ('information_schema', 'omnidb', 'pg_catalog', 'pg_toast')
and sf.r_table_schema not like 'pg%%temp%%'
--#FILTER_PATTERN_CASE_SENSITIVE# and sf.r_column_name like '#VALUE_PATTERN_CASE_SENSITIVE#'
--#FILTER_PATTERN_CASE_INSENSITIVE# and lower(sf.r_column_name) like lower('#VALUE_PATTERN_CASE_INSENSITIVE#')
--#FILTER_PATTERN_REGEX_CASE_SENSITIVE# and sf.r_column_name ~ '#VALUE_PATTERN_REGEX_CASE_SENSITIVE#'
--#FILTER_PATTERN_REGEX_CASE_INSENSITIVE# and sf.r_column_name ~* '#VALUE_PATTERN_REGEX_CASE_INSENSITIVE#'
--#FILTER_BY_SCHEMA# and lower(sf.r_table_schema) in (#VALUE_BY_SCHEMA#)
'''
if p_inSchemas != '':
v_sql = v_sql.replace('--#FILTER_BY_SCHEMA#', '').replace('#VALUE_BY_SCHEMA#', p_inSchemas)
if p_regex:
if p_caseSentive:
v_sql = v_sql.replace('--#FILTER_PATTERN_REGEX_CASE_SENSITIVE#', '').replace('#VALUE_PATTERN_REGEX_CASE_SENSITIVE#', p_textPattern.replace("'", "''"))
else:
v_sql = v_sql.replace('--#FILTER_PATTERN_REGEX_CASE_INSENSITIVE#', '').replace('#VALUE_PATTERN_REGEX_CASE_INSENSITIVE#', p_textPattern.replace("'", "''"))
else:
if p_caseSentive:
v_sql = v_sql.replace('--#FILTER_PATTERN_CASE_SENSITIVE#', '').replace('#VALUE_PATTERN_CASE_SENSITIVE#', p_textPattern.replace("'", "''"))
else:
v_sql = v_sql.replace('--#FILTER_PATTERN_CASE_INSENSITIVE#', '').replace('#VALUE_PATTERN_CASE_INSENSITIVE#', p_textPattern.replace("'", "''"))
return v_sql
def AdvancedObjectSearchPKColumnName(self, p_textPattern, p_caseSentive, p_regex, p_inSchemas):
v_sql = '''
select 'PK Column Name'::text as category,
quote_ident(tc.table_schema)::text as schema_name,
quote_ident(tc.table_name)::text as table_name,
''::text as column_name,
quote_ident(kc.column_name) as match_value
from information_schema.table_constraints tc
inner join information_schema.key_column_usage kc
on kc.table_name = tc.table_name
and kc.table_schema = tc.table_schema
and kc.constraint_name = tc.constraint_name
where tc.constraint_type = 'PRIMARY KEY'
and quote_ident(tc.table_schema) not in ('information_schema', 'omnidb', 'pg_catalog', 'pg_toast')
and quote_ident(tc.table_schema) not like 'pg%%temp%%'
--#FILTER_PATTERN_CASE_SENSITIVE# and quote_ident(kc.column_name) like '#VALUE_PATTERN_CASE_SENSITIVE#'
--#FILTER_PATTERN_CASE_INSENSITIVE# and lower(quote_ident(kc.column_name)) like lower('#VALUE_PATTERN_CASE_INSENSITIVE#')
--#FILTER_PATTERN_REGEX_CASE_SENSITIVE# and quote_ident(kc.column_name) ~ '#VALUE_PATTERN_REGEX_CASE_SENSITIVE#'
--#FILTER_PATTERN_REGEX_CASE_INSENSITIVE# and quote_ident(kc.column_name) ~* '#VALUE_PATTERN_REGEX_CASE_INSENSITIVE#'
--#FILTER_BY_SCHEMA# and lower(quote_ident(tc.table_schema)) in (#VALUE_BY_SCHEMA#)
'''
if p_inSchemas != '':
v_sql = v_sql.replace('--#FILTER_BY_SCHEMA#', '').replace('#VALUE_BY_SCHEMA#', p_inSchemas)
if p_regex:
if p_caseSentive:
v_sql = v_sql.replace('--#FILTER_PATTERN_REGEX_CASE_SENSITIVE#', '').replace('#VALUE_PATTERN_REGEX_CASE_SENSITIVE#', p_textPattern.replace("'", "''"))
else:
v_sql = v_sql.replace('--#FILTER_PATTERN_REGEX_CASE_INSENSITIVE#', '').replace('#VALUE_PATTERN_REGEX_CASE_INSENSITIVE#', p_textPattern.replace("'", "''"))
else:
if p_caseSentive:
v_sql = v_sql.replace('--#FILTER_PATTERN_CASE_SENSITIVE#', '').replace('#VALUE_PATTERN_CASE_SENSITIVE#', p_textPattern.replace("'", "''"))
else:
v_sql = v_sql.replace('--#FILTER_PATTERN_CASE_INSENSITIVE#', '').replace('#VALUE_PATTERN_CASE_INSENSITIVE#', p_textPattern.replace("'", "''"))
return v_sql
def AdvancedObjectSearchUniqueColumnName(self, p_textPattern, p_caseSentive, p_regex, p_inSchemas):
v_sql = '''
select 'Unique Column Name'::text as category,
quote_ident(tc.table_schema)::text as schema_name,
quote_ident(tc.table_name)::text as table_name,
''::text as column_name,
quote_ident(kc.column_name) as match_value
from information_schema.table_constraints tc
inner join information_schema.key_column_usage kc
on kc.table_name = tc.table_name
and kc.table_schema = tc.table_schema
and kc.constraint_name = tc.constraint_name
where tc.constraint_type = 'UNIQUE'
and quote_ident(tc.table_schema) not in ('information_schema', 'omnidb', 'pg_catalog', 'pg_toast')
and quote_ident(tc.table_schema) not like 'pg%%temp%%'
--#FILTER_PATTERN_CASE_SENSITIVE# and quote_ident(kc.column_name) like '#VALUE_PATTERN_CASE_SENSITIVE#'
--#FILTER_PATTERN_CASE_INSENSITIVE# and lower(quote_ident(kc.column_name)) like lower('#VALUE_PATTERN_CASE_INSENSITIVE#')
--#FILTER_PATTERN_REGEX_CASE_SENSITIVE# and quote_ident(kc.column_name) ~ '#VALUE_PATTERN_REGEX_CASE_SENSITIVE#'
--#FILTER_PATTERN_REGEX_CASE_INSENSITIVE# and quote_ident(kc.column_name) ~* '#VALUE_PATTERN_REGEX_CASE_INSENSITIVE#'
--#FILTER_BY_SCHEMA# and lower(quote_ident(tc.table_schema)) in (#VALUE_BY_SCHEMA#)
'''
if p_inSchemas != '':
v_sql = v_sql.replace('--#FILTER_BY_SCHEMA#', '').replace('#VALUE_BY_SCHEMA#', p_inSchemas)
if p_regex:
if p_caseSentive:
v_sql = v_sql.replace('--#FILTER_PATTERN_REGEX_CASE_SENSITIVE#', '').replace('#VALUE_PATTERN_REGEX_CASE_SENSITIVE#', p_textPattern.replace("'", "''"))
else:
v_sql = v_sql.replace('--#FILTER_PATTERN_REGEX_CASE_INSENSITIVE#', '').replace('#VALUE_PATTERN_REGEX_CASE_INSENSITIVE#', p_textPattern.replace("'", "''"))
else:
if p_caseSentive:
v_sql = v_sql.replace('--#FILTER_PATTERN_CASE_SENSITIVE#', '').replace('#VALUE_PATTERN_CASE_SENSITIVE#', p_textPattern.replace("'", "''"))
else:
v_sql = v_sql.replace('--#FILTER_PATTERN_CASE_INSENSITIVE#', '').replace('#VALUE_PATTERN_CASE_INSENSITIVE#', p_textPattern.replace("'", "''"))
return v_sql
def AdvancedObjectSearchIndexColumnName(self, p_textPattern, p_caseSentive, p_regex, p_inSchemas):
v_sql = '''
select *
from (
select 'Index Column Name'::text as category,
quote_ident(t.schemaname)::text as schema_name,
quote_ident(t.tablename)::text as table_name,
''::text as column_name,
unnest(string_to_array(replace(substr(t.indexdef, strpos(t.indexdef, '(')+1, strpos(t.indexdef, ')')-strpos(t.indexdef, '(')-1), ' ', ''),',')) as match_value
from pg_indexes t
) t
where quote_ident(t.schemaname) not in ('information_schema', 'omnidb', 'pg_catalog', 'pg_toast')
and quote_ident(t.schemaname) not like 'pg%%temp%%'
--#FILTER_PATTERN_CASE_SENSITIVE# and quote_ident(t.match_value) like '#VALUE_PATTERN_CASE_SENSITIVE#'
--#FILTER_PATTERN_CASE_INSENSITIVE# and lower(quote_ident(t.match_value)) like lower('#VALUE_PATTERN_CASE_INSENSITIVE#')
--#FILTER_PATTERN_REGEX_CASE_SENSITIVE# and quote_ident(t.match_value) ~ '#VALUE_PATTERN_REGEX_CASE_SENSITIVE#'
--#FILTER_PATTERN_REGEX_CASE_INSENSITIVE# and quote_ident(t.match_value) ~* '#VALUE_PATTERN_REGEX_CASE_INSENSITIVE#'
--#FILTER_BY_SCHEMA# and lower(quote_ident(t.schema_name)) in (#VALUE_BY_SCHEMA#)
'''
if p_inSchemas != '':
v_sql = v_sql.replace('--#FILTER_BY_SCHEMA#', '').replace('#VALUE_BY_SCHEMA#', p_inSchemas)
if p_regex:
if p_caseSentive:
v_sql = v_sql.replace('--#FILTER_PATTERN_REGEX_CASE_SENSITIVE#', '').replace('#VALUE_PATTERN_REGEX_CASE_SENSITIVE#', p_textPattern.replace("'", "''"))
else:
v_sql = v_sql.replace('--#FILTER_PATTERN_REGEX_CASE_INSENSITIVE#', '').replace('#VALUE_PATTERN_REGEX_CASE_INSENSITIVE#', p_textPattern.replace("'", "''"))
else:
if p_caseSentive:
v_sql = v_sql.replace('--#FILTER_PATTERN_CASE_SENSITIVE#', '').replace('#VALUE_PATTERN_CASE_SENSITIVE#', p_textPattern.replace("'", "''"))
else:
v_sql = v_sql.replace('--#FILTER_PATTERN_CASE_INSENSITIVE#', '').replace('#VALUE_PATTERN_CASE_INSENSITIVE#', p_textPattern.replace("'", "''"))
return v_sql
def AdvancedObjectSearchCheckDefinition(self, p_textPattern, p_caseSentive, p_regex, p_inSchemas):
v_sql = '''
select 'Check Definition'::text as category,
quote_ident(n.nspname)::text as schema_name,
quote_ident(t.relname)::text as table_name,
''::text as column_name,
pg_get_constraintdef(c.oid) as match_value
from pg_constraint c
inner join pg_class t
on t.oid = c.conrelid
inner join pg_namespace n
on t.relnamespace = n.oid
where contype = 'c'
and quote_ident(n.nspname) not in ('information_schema', 'omnidb', 'pg_catalog', 'pg_toast')
and quote_ident(n.nspname) not like 'pg%%temp%%'
--#FILTER_PATTERN_CASE_SENSITIVE# and pg_get_constraintdef(c.oid) like '#VALUE_PATTERN_CASE_SENSITIVE#'
--#FILTER_PATTERN_CASE_INSENSITIVE# and lower(pg_get_constraintdef(c.oid)) like lower('#VALUE_PATTERN_CASE_INSENSITIVE#')
--#FILTER_PATTERN_REGEX_CASE_SENSITIVE# and pg_get_constraintdef(c.oid) ~ '#VALUE_PATTERN_REGEX_CASE_SENSITIVE#'
--#FILTER_PATTERN_REGEX_CASE_INSENSITIVE# and pg_get_constraintdef(c.oid) ~* '#VALUE_PATTERN_REGEX_CASE_INSENSITIVE#'
--#FILTER_BY_SCHEMA# and lower(quote_ident(n.nspname)) in (#VALUE_BY_SCHEMA#)
'''
if p_inSchemas != '':
v_sql = v_sql.replace('--#FILTER_BY_SCHEMA#', '').replace('#VALUE_BY_SCHEMA#', p_inSchemas)
if p_regex:
if p_caseSentive:
v_sql = v_sql.replace('--#FILTER_PATTERN_REGEX_CASE_SENSITIVE#', '').replace('#VALUE_PATTERN_REGEX_CASE_SENSITIVE#', p_textPattern.replace("'", "''"))
else:
v_sql = v_sql.replace('--#FILTER_PATTERN_REGEX_CASE_INSENSITIVE#', '').replace('#VALUE_PATTERN_REGEX_CASE_INSENSITIVE#', p_textPattern.replace("'", "''"))
else:
if p_caseSentive:
v_sql = v_sql.replace('--#FILTER_PATTERN_CASE_SENSITIVE#', '').replace('#VALUE_PATTERN_CASE_SENSITIVE#', p_textPattern.replace("'", "''"))
else:
v_sql = v_sql.replace('--#FILTER_PATTERN_CASE_INSENSITIVE#', '').replace('#VALUE_PATTERN_CASE_INSENSITIVE#', p_textPattern.replace("'", "''"))
return v_sql
def AdvancedObjectSearchTableTriggerName(self, p_textPattern, p_caseSentive, p_regex, p_inSchemas):
v_sql = '''
select 'Table Trigger Name'::text as category,
quote_ident(n.nspname)::text as schema_name,
quote_ident(c.relname)::text as table_name,
''::text as column_name,
quote_ident(t.tgname) as match_value
from pg_trigger t
inner join pg_class c
on c.oid = t.tgrelid
inner join pg_namespace n
on n.oid = c.relnamespace
inner join pg_proc p
on p.oid = t.tgfoid
inner join pg_namespace np
on np.oid = p.pronamespace
where not t.tgisinternal
and quote_ident(n.nspname) not in ('information_schema', 'omnidb', 'pg_catalog', 'pg_toast')
and quote_ident(n.nspname) not | |
assert isinstance(event, Event)
assert isinstance(event.assoc, Association)
assert isinstance(event.timestamp, datetime)
assert event.event.name == 'EVT_ABORTED'
scp.shutdown()
def test_abort_bind(self):
"""Test binding a handler to EVT_ABORTED."""
triggered = []
def handle(event):
triggered.append(event)
self.ae = ae = AE()
ae.add_supported_context(VerificationSOPClass)
ae.add_requested_context(VerificationSOPClass)
handlers = [(evt.EVT_ABORTED, handle)]
scp = ae.start_server(('', 11112), block=False)
assoc = ae.associate('localhost', 11112)
assert assoc.is_established
assert len(scp.active_associations) == 1
assert assoc.get_handlers(evt.EVT_ABORTED) == []
assert assoc.get_handlers(evt.EVT_ACCEPTED) == []
assert assoc.get_handlers(evt.EVT_ESTABLISHED) == []
assert assoc.get_handlers(evt.EVT_REJECTED) == []
assert assoc.get_handlers(evt.EVT_RELEASED) == []
assert assoc.get_handlers(evt.EVT_REQUESTED) == []
assoc.bind(evt.EVT_ABORTED, handle)
assert assoc.get_handlers(evt.EVT_ABORTED) == [(handle, None)]
assert assoc.get_handlers(evt.EVT_ACCEPTED) == []
assert assoc.get_handlers(evt.EVT_ESTABLISHED) == []
assert assoc.get_handlers(evt.EVT_REJECTED) == []
assert assoc.get_handlers(evt.EVT_RELEASED) == []
assert assoc.get_handlers(evt.EVT_REQUESTED) == []
assoc.abort()
while scp.active_associations:
time.sleep(0.05)
assert len(triggered) == 1
event = triggered[0]
assert isinstance(event, Event)
assert isinstance(event.assoc, Association)
assert isinstance(event.timestamp, datetime)
assert event.event.name == 'EVT_ABORTED'
scp.shutdown()
def test_abort_unbind(self):
"""Test starting with handler bound to EVT_ABORTED."""
triggered = []
def handle(event):
triggered.append(event)
self.ae = ae = AE()
ae.add_supported_context(VerificationSOPClass)
ae.add_requested_context(VerificationSOPClass)
handlers = [(evt.EVT_ABORTED, handle)]
scp = ae.start_server(('', 11112), block=False)
assoc = ae.associate('localhost', 11112, evt_handlers=handlers)
assert assoc.is_established
assert len(scp.active_associations) == 1
assert assoc.get_handlers(evt.EVT_ABORTED) == [(handle, None)]
assert assoc.get_handlers(evt.EVT_ACCEPTED) == []
assert assoc.get_handlers(evt.EVT_ESTABLISHED) == []
assert assoc.get_handlers(evt.EVT_REJECTED) == []
assert assoc.get_handlers(evt.EVT_RELEASED) == []
assert assoc.get_handlers(evt.EVT_REQUESTED) == []
assoc.unbind(evt.EVT_ABORTED, handle)
assert assoc.get_handlers(evt.EVT_ABORTED) == []
assert assoc.get_handlers(evt.EVT_ACCEPTED) == []
assert assoc.get_handlers(evt.EVT_ESTABLISHED) == []
assert assoc.get_handlers(evt.EVT_REJECTED) == []
assert assoc.get_handlers(evt.EVT_RELEASED) == []
assert assoc.get_handlers(evt.EVT_REQUESTED) == []
assoc.abort()
while scp.active_associations:
time.sleep(0.05)
assert len(triggered) == 0
scp.shutdown()
def test_abort_remote(self):
"""Test the handler bound to EVT_ABORTED with local requested abort."""
triggered = []
def handle(event):
triggered.append(event)
self.ae = ae = AE()
ae.add_supported_context(VerificationSOPClass)
ae.add_requested_context(VerificationSOPClass)
handlers = [(evt.EVT_ABORTED, handle)]
scp = ae.start_server(('', 11112), block=False)
assoc = ae.associate('localhost', 11112, evt_handlers=handlers)
assert assoc.is_established
assert len(scp.active_associations) == 1
scp.active_associations[0].abort()
while scp.active_associations:
time.sleep(0.05)
assert len(triggered) == 1
event = triggered[0]
assert isinstance(event, Event)
assert isinstance(event.assoc, Association)
assert isinstance(event.timestamp, datetime)
assert event.event.name == 'EVT_ABORTED'
scp.shutdown()
def test_abort_raises(self, caplog):
"""Test the handler for EVT_ACCEPTED raising exception."""
def handle(event):
raise NotImplementedError("Exception description")
self.ae = ae = AE()
ae.add_supported_context(VerificationSOPClass)
ae.add_requested_context(VerificationSOPClass)
handlers = [(evt.EVT_ABORTED, handle)]
scp = ae.start_server(('', 11112), block=False)
with caplog.at_level(logging.ERROR, logger='pynetdicom'):
assoc = ae.associate('localhost', 11112, evt_handlers=handlers)
assert assoc.is_established
assoc.abort()
while scp.active_associations:
time.sleep(0.05)
scp.shutdown()
msg = (
"Exception raised in user's 'evt.EVT_ABORTED' event handler"
" 'handle'"
)
assert msg in caplog.text
assert "Exception description" in caplog.text
def test_accept(self):
"""Test starting with handler bound to EVT_ACCEPTED."""
triggered = []
def handle(event):
triggered.append(event)
self.ae = ae = AE()
ae.add_supported_context(VerificationSOPClass)
ae.add_requested_context(VerificationSOPClass)
handlers = [(evt.EVT_ACCEPTED, handle)]
scp = ae.start_server(('', 11112), block=False)
assoc = ae.associate('localhost', 11112, evt_handlers=handlers)
assert assoc.is_established
assert len(scp.active_associations) == 1
assert assoc.get_handlers(evt.EVT_ABORTED) == []
assert assoc.get_handlers(evt.EVT_ACCEPTED) == [(handle, None)]
assert assoc.get_handlers(evt.EVT_ESTABLISHED) == []
assert assoc.get_handlers(evt.EVT_REJECTED) == []
assert assoc.get_handlers(evt.EVT_RELEASED) == []
assert assoc.get_handlers(evt.EVT_REQUESTED) == []
assoc.abort()
while scp.active_associations:
time.sleep(0.05)
assert len(triggered) == 1
event = triggered[0]
assert isinstance(event, Event)
assert isinstance(event.assoc, Association)
assert isinstance(event.timestamp, datetime)
assert event.event.name == 'EVT_ACCEPTED'
scp.shutdown()
def test_accept_raises(self, caplog):
"""Test the handler for EVT_ACCEPTED raising exception."""
def handle(event):
raise NotImplementedError("Exception description")
self.ae = ae = AE()
ae.add_supported_context(VerificationSOPClass)
ae.add_requested_context(VerificationSOPClass)
handlers = [(evt.EVT_ACCEPTED, handle)]
scp = ae.start_server(('', 11112), block=False)
with caplog.at_level(logging.ERROR, logger='pynetdicom'):
assoc = ae.associate('localhost', 11112, evt_handlers=handlers)
assert assoc.is_established
assoc.abort()
while scp.active_associations:
time.sleep(0.05)
scp.shutdown()
msg = (
"Exception raised in user's 'evt.EVT_ACCEPTED' event handler"
" 'handle'"
)
assert msg in caplog.text
assert "Exception description" in caplog.text
def test_release(self):
"""Test starting with handler bound to EVT_RELEASED."""
triggered = []
def handle(event):
triggered.append(event)
self.ae = ae = AE()
ae.add_supported_context(VerificationSOPClass)
ae.add_requested_context(VerificationSOPClass)
handlers = [(evt.EVT_RELEASED, handle)]
scp = ae.start_server(('', 11112), block=False)
assoc = ae.associate('localhost', 11112, evt_handlers=handlers)
assert assoc.is_established
assert len(scp.active_associations) == 1
assert assoc.get_handlers(evt.EVT_ABORTED) == []
assert assoc.get_handlers(evt.EVT_ACCEPTED) == []
assert assoc.get_handlers(evt.EVT_ESTABLISHED) == []
assert assoc.get_handlers(evt.EVT_REJECTED) == []
assert assoc.get_handlers(evt.EVT_RELEASED) == [(handle, None)]
assert assoc.get_handlers(evt.EVT_REQUESTED) == []
assoc.release()
while scp.active_associations:
time.sleep(0.05)
assert len(triggered) == 1
event = triggered[0]
assert isinstance(event, Event)
assert isinstance(event.assoc, Association)
assert isinstance(event.timestamp, datetime)
assert event.event.name == 'EVT_RELEASED'
scp.shutdown()
def test_release_bind(self):
"""Test binding a handler to EVT_RELEASED."""
triggered = []
def handle(event):
triggered.append(event)
self.ae = ae = AE()
ae.add_supported_context(VerificationSOPClass)
ae.add_requested_context(VerificationSOPClass)
handlers = [(evt.EVT_RELEASED, handle)]
scp = ae.start_server(('', 11112), block=False)
assoc = ae.associate('localhost', 11112)
assert assoc.is_established
assert len(scp.active_associations) == 1
assert assoc.get_handlers(evt.EVT_RELEASED) == []
assoc.bind(evt.EVT_RELEASED, handle)
assert assoc.get_handlers(evt.EVT_RELEASED) == [(handle, None)]
assoc.release()
while scp.active_associations:
time.sleep(0.05)
assert len(triggered) == 1
event = triggered[0]
assert isinstance(event, Event)
assert isinstance(event.assoc, Association)
assert isinstance(event.timestamp, datetime)
assert event.event.name == 'EVT_RELEASED'
scp.shutdown()
def test_release_unbind(self):
"""Test starting with handler bound to EVT_ABORTED."""
triggered = []
def handle(event):
triggered.append(event)
self.ae = ae = AE()
ae.add_supported_context(VerificationSOPClass)
ae.add_requested_context(VerificationSOPClass)
handlers = [(evt.EVT_RELEASED, handle)]
scp = ae.start_server(('', 11112), block=False)
assoc = ae.associate('localhost', 11112, evt_handlers=handlers)
assert assoc.is_established
assert len(scp.active_associations) == 1
assert assoc.get_handlers(evt.EVT_RELEASED) == [(handle, None)]
assoc.unbind(evt.EVT_RELEASED, handle)
assert assoc.get_handlers(evt.EVT_RELEASED) == []
assoc.release()
while scp.active_associations:
time.sleep(0.05)
assert len(triggered) == 0
scp.shutdown()
def test_release_remote(self):
"""Test the handler bound to EVT_RELEASED with local requested abort."""
triggered = []
def handle(event):
triggered.append(event)
self.ae = ae = AE()
ae.add_supported_context(VerificationSOPClass)
ae.add_requested_context(VerificationSOPClass)
handlers = [(evt.EVT_RELEASED, handle)]
scp = ae.start_server(('', 11112), block=False)
assoc = ae.associate('localhost', 11112, evt_handlers=handlers)
assert assoc.is_established
assert len(scp.active_associations) == 1
scp.active_associations[0].release()
while scp.active_associations:
time.sleep(0.05)
assert len(triggered) == 1
event = triggered[0]
assert isinstance(event, Event)
assert isinstance(event.assoc, Association)
assert isinstance(event.timestamp, datetime)
assert event.event.name == 'EVT_RELEASED'
scp.shutdown()
def test_release_raises(self, caplog):
"""Test the handler for EVT_RELEASED raising exception."""
def handle(event):
raise NotImplementedError("Exception description")
self.ae = ae = AE()
ae.add_supported_context(VerificationSOPClass)
ae.add_requested_context(VerificationSOPClass)
handlers = [(evt.EVT_RELEASED, handle)]
scp = ae.start_server(('', 11112), block=False)
with caplog.at_level(logging.ERROR, logger='pynetdicom'):
assoc = ae.associate('localhost', 11112, evt_handlers=handlers)
assert assoc.is_established
assoc.release()
while scp.active_associations:
time.sleep(0.05)
scp.shutdown()
msg = (
"Exception raised in user's 'evt.EVT_RELEASED' event handler"
" 'handle'"
)
assert msg in caplog.text
assert "Exception description" in caplog.text
def test_established(self):
"""Test starting with handler bound to EVT_ESTABLISHED."""
triggered = []
def handle(event):
triggered.append(event)
self.ae = ae = AE()
ae.add_supported_context(VerificationSOPClass)
ae.add_requested_context(VerificationSOPClass)
handlers = [(evt.EVT_ESTABLISHED, handle)]
scp = ae.start_server(('', 11112), block=False)
assoc = ae.associate('localhost', 11112, evt_handlers=handlers)
assert assoc.is_established
assert len(scp.active_associations) == 1
assert assoc.get_handlers(evt.EVT_ABORTED) == []
assert assoc.get_handlers(evt.EVT_ACCEPTED) == []
assert assoc.get_handlers(evt.EVT_ESTABLISHED) == [(handle, None)]
assert assoc.get_handlers(evt.EVT_REJECTED) == []
assert assoc.get_handlers(evt.EVT_RELEASED) == []
assert assoc.get_handlers(evt.EVT_REQUESTED) == []
assoc.release()
while scp.active_associations:
time.sleep(0.05)
assert len(triggered) == 1
event = triggered[0]
assert isinstance(event, Event)
assert isinstance(event.assoc, Association)
assert isinstance(event.timestamp, datetime)
assert event.event.name == 'EVT_ESTABLISHED'
scp.shutdown()
def test_established_raises(self, caplog):
"""Test the handler for EVT_ESTABLISHED raising exception."""
def handle(event):
raise NotImplementedError("Exception description")
self.ae = ae = AE()
ae.add_supported_context(VerificationSOPClass)
ae.add_requested_context(VerificationSOPClass)
handlers = [(evt.EVT_ESTABLISHED, handle)]
scp = ae.start_server(('', 11112), block=False)
with caplog.at_level(logging.ERROR, logger='pynetdicom'):
assoc = ae.associate('localhost', 11112, evt_handlers=handlers)
assert assoc.is_established
assoc.release()
while scp.active_associations:
time.sleep(0.05)
scp.shutdown()
msg = (
"Exception raised in user's 'evt.EVT_ESTABLISHED' event handler"
" 'handle'"
)
assert msg in caplog.text
assert "Exception description" in caplog.text
def test_requested(self):
"""Test starting with handler bound to EVT_REQUESTED."""
triggered = []
def handle(event):
triggered.append(event)
self.ae = ae = AE()
ae.add_supported_context(VerificationSOPClass)
ae.add_requested_context(VerificationSOPClass)
handlers = [(evt.EVT_REQUESTED, handle)]
scp = ae.start_server(('', 11112), block=False)
assoc = ae.associate('localhost', 11112, evt_handlers=handlers)
assert assoc.is_established
assert len(scp.active_associations) == 1
assert assoc.get_handlers(evt.EVT_ABORTED) == []
assert assoc.get_handlers(evt.EVT_ACCEPTED) == []
assert assoc.get_handlers(evt.EVT_ESTABLISHED) == []
assert assoc.get_handlers(evt.EVT_REJECTED) == []
assert assoc.get_handlers(evt.EVT_RELEASED) == []
assert assoc.get_handlers(evt.EVT_REQUESTED) == [(handle, None)]
assoc.release()
while scp.active_associations:
time.sleep(0.05)
assert len(triggered) == 1
event = triggered[0]
assert isinstance(event, Event)
assert isinstance(event.assoc, Association)
assert isinstance(event.timestamp, datetime)
assert event.event.name == 'EVT_REQUESTED'
scp.shutdown()
def test_requested_raises(self, caplog):
"""Test the handler for EVT_REQUESTED raising exception."""
def handle(event):
raise NotImplementedError("Exception description")
self.ae = ae = AE()
ae.add_supported_context(VerificationSOPClass)
ae.add_requested_context(VerificationSOPClass)
handlers = [(evt.EVT_REQUESTED, handle)]
scp = ae.start_server(('', 11112), block=False)
with caplog.at_level(logging.ERROR, logger='pynetdicom'):
assoc = ae.associate('localhost', 11112, evt_handlers=handlers)
assert assoc.is_established
assoc.release()
while scp.active_associations:
time.sleep(0.05)
scp.shutdown()
msg = (
"Exception raised in user's 'evt.EVT_REQUESTED' event handler"
" 'handle'"
)
assert msg in caplog.text
assert "Exception description" in caplog.text
def test_rejected(self):
"""Test starting with handler bound to EVT_REJECTED."""
triggered = []
def handle(event):
triggered.append(event)
self.ae = ae = AE()
ae.require_called_aet = True
ae.add_supported_context(CTImageStorage)
ae.add_requested_context(VerificationSOPClass)
handlers = [(evt.EVT_REJECTED, handle)]
scp = ae.start_server(('', 11112), block=False)
assoc = ae.associate('localhost', 11112, evt_handlers=handlers)
assert assoc.is_rejected
assert assoc.get_handlers(evt.EVT_ABORTED) == []
assert assoc.get_handlers(evt.EVT_ACCEPTED) == []
assert assoc.get_handlers(evt.EVT_ESTABLISHED) == []
assert assoc.get_handlers(evt.EVT_REJECTED) == [(handle, None)]
assert assoc.get_handlers(evt.EVT_RELEASED) == []
| |
need any diffs
if not self.produce_diffs:
return {}
result = {}
for col in self.diff_generator_data:
# Only process attributes for which out it set.
attname = col['out']
incol = (col['in'] if 'in' in col and col['in'] else attname)
# if diff is False = copy the attribute as is.
if 'diff' in col and col['diff'] is False:
result[attname] = (cur[incol] if incol in cur else None)
elif 'fn' in col:
# if diff is True and fn is supplied - apply it to the current and previous row.
result[attname] = (col['fn'](incol, cur, prev) if cur.get(incol, None) is not None and prev.get(incol,
None) is not None else None)
else:
# default case - calculate the diff between the current attribute's values of
# old and new rows and divide it by the time interval passed between measurements.
result[attname] = ((cur[incol] - prev[incol]) / self.diff_time if cur.get(incol, None) is not None and
prev.get(incol, None) is not None and self.diff_time >= 0 else None)
return result
def _produce_output_row(self, row):
""" produce the output row for the screen, json or the database
from the diff rows. It consists of renaming columns and rounding
the result when necessary
"""
result = {}
# produce the output row column by column
for col in self.output_transform_data:
attname = self._produce_output_name(col)
val = self._produce_output_value(row, col)
result[attname] = val
return result
@staticmethod
def _produce_output_value(row, col, method=OUTPUT_METHOD.console):
# get the input value
if 'in' in col:
val = row.get(col['in'], None)
else:
val = row.get(col['out'], None)
# if function is specified - apply it to the input value
if 'fn' in col and val is not None:
val = col['fn'](val)
# if rounding is necessary - round the input value up to specified
# decimal points
if 'round' in col and val is not None:
val = round(val, col['round'])
return val
def _produce_output_name(self, col):
# get the output column name
attname = col['out']
# add units to the column name if neccessary
if 'units' in col and self.show_units:
attname += ' ' + col['units']
return attname
@staticmethod
def _calculate_output_status(row, col, val, method):
""" Examine the current status indicators and produce the status
value for the specific column of the given row
"""
st = {-1: COLSTATUS.cs_ok}
# if value is missing - don't bother calculating anything
if val is None:
return st
if 'status_fn' in col:
st = col['status_fn'](row, col)
if len(st) == 0:
st = {-1: COLSTATUS.cs_ok}
else:
words = str(val).split()
for i, word in enumerate(words):
for st_name, st_status in zip(('critical', 'warning'), (COLSTATUS.cs_critical, COLSTATUS.cs_warning)):
if st_name in col:
typ = type(col[st_name])
if typ == int:
typ = float
if typ(word) >= col[st_name]:
st[i] = st_status
break
if i not in st:
st[i] = COLSTATUS.cs_ok
return st
def _get_columns_to_hide(self, result_rows, status_rows):
""" scan the (cooked) rows, do not show columns that are empty """
to_skip = []
for col in self.output_transform_data:
if col.get('pos') == -1:
continue
attname = self._produce_output_name(col)
empty = True
for r in result_rows:
if r[attname].value != '':
empty = False
break
if empty:
to_skip.append(attname)
elif col.get('hide_if_ok', False):
status_ok = True
for row in status_rows:
if attname in row and row[attname]:
for cl in row[attname]:
if row[attname][cl] != COLSTATUS.cs_ok:
status_ok = False
break
if not status_ok:
break
if status_ok:
to_skip.append(attname)
return to_skip
def _transform_input(self, x, custom_transformation_data=None):
if isinstance(x, list) or isinstance(x, tuple):
return self._transform_list(x, custom_transformation_data)
elif isinstance(x, dict):
return self._transform_dict(x, custom_transformation_data)
elif isinstance(x, str):
return self._transform_string(x)
else:
raise Exception('transformation of data type {0} is not supported'.format(type(x)))
# The following 2 functions are almost the same. The only difference is the
# behavior in case 'in' is not specified: the _dict version assumes the in
# column is the same as the out one, the list emits the warning and skips
# the column.
def _transform_list(self, x, custom_transformation_data=None):
result = {}
# choose between the 'embedded' and external transformations
if custom_transformation_data is not None:
transformation_data = custom_transformation_data
else:
transformation_data = self.transform_list_data
if transformation_data is not None:
total = len(x)
for col in transformation_data:
# set the output column name
attname = col['out']
if 'infn' in col:
if len(x) > 0:
result[attname] = col['infn'](attname, x, 'optional' in col and col['optional'])
else:
result[attname] = None
else:
incol = col['in']
# get the column from which the value is extracted
if incol > total - 1:
result[attname] = None
# complain on optional columns, but only if the list to transform has any data
# we want to catch cases when the data collectors (i.e. df, du) doesn't deliver
# the result in the format we ask them to, but, on the other hand, if there is
# nothing at all from them - then the problem is elsewhere and there is no need
# to bleat here for each missing column.
if not col.get('optional', False) and len(x) > 0:
self.warn_non_optional_column(incol)
else:
result[attname] = x[incol]
# if transformation function is supplied - apply it to the input data.
if 'fn' in col and result[attname] is not None:
result[attname] = col['fn'](result[attname])
return result
raise Exception('No data for the list transformation supplied')
# Most of the functionality is the same as in the dict transforming function above.
def _transform_dict(self, x, custom_transformation_data=None):
result = {}
if custom_transformation_data is not None:
transformation_data = custom_transformation_data
else:
transformation_data = self.transform_dict_data
if transformation_data:
for col in transformation_data:
attname = col['out']
# if input column name is not supplied - assume it's the same as an output one.
incol = self._get_input_column_name(col)
# if infn is supplied - it calculates the column value possbily using other values
# in the row - we don't use incoming column in this case.
if 'infn' in col:
if len(x) > 0:
result[attname] = col['infn'](attname, x, 'optional' in col and col['optional'])
else:
result[attname] = None
elif incol not in x:
# if the column is marked as optional and it's not present in the output data
# set None instead
result[attname] = None
# see the comment at _transform_list on why we do complain here.
if not col.get('optional', False) and len(x) > 0:
self.warn_non_optional_column(incol)
else:
result[attname] = x[incol]
if 'fn' in col and result[attname] is not None:
result[attname] = col['fn'](result[attname])
return result
raise Exception('No data for the dict transformation supplied')
@staticmethod
def _transform_string(d):
raise Exception('transformation of input type string is not implemented')
def _output_template_for_console(self):
return ' '.join(self._output_row_for_console(None, 't'))
def _output_row_for_console(self, row, typ='v'):
return self._output_row_generic(row, typ, method=OUTPUT_METHOD.console)
def _output_row_for_curses(self, row, typ='v'):
return self._output_row_generic(row, typ, method=OUTPUT_METHOD.curses)
def _output_row_generic(self, row, typ='v', method=OUTPUT_METHOD.console):
""" produce a single output row of the type specified by the
last argument:
t - template row
h - header row (only names)
v - values rows
"""
vals = []
# produce the output row column by column
for i, col in enumerate(self.output_transform_data):
# get the final attribute name and value
if typ == 't':
if 'w' not in col:
val = '{{{0}}}'.format(i)
else:
val = '{{{0}:<{1}}}'.format(i, col['w'])
elif typ == 'h':
val = self._produce_output_name(col)
else:
val = self._produce_output_value(row, col, method)
# prepare the list for the output
vals.append(val)
if 'typ' != 'v':
return vals
else:
return vals
def console_output(self, rows, before_string=None, after_string=None):
""" Main entry point for preparing textual console output """
result = []
# start by filling-out width of the values
self._calculate_dynamic_width(rows)
# now produce output template, headers and actual values
templ = self._output_template_for_console()
header = self._output_row_for_console(None, 'h')
if before_string:
result.append(before_string)
result.append(templ.format(*header))
for r in rows:
row = self._output_row_for_console(r, 'v')
result.append(templ.format(*row))
if after_string:
result.append(after_string)
return '\n'.join(result)
def _calculate_dynamic_width(self, rows, method=OUTPUT_METHOD.console):
""" Examine values in all rows and get the width dynamically """
for col in self.output_transform_data:
minw = col.get('minw', 0)
attname = self._produce_output_name(col)
# XXX: if append_column_header, min width should include the size of the attribut name
if method == OUTPUT_METHOD.curses and self.ncurses_custom_fields.get('prepend_column_headers'):
minw += len(attname) + 1
col['w'] = len(attname)
# use cooked values
for row in rows:
if method == OUTPUT_METHOD.curses | |
label='IMF 1 with 11 knots')
axs[2].plot(time, imfs_31[2, :], label='IMF 2 with 31 knots')
axs[2].plot(time, imfs_51[3, :], label='IMF 3 with 51 knots')
for knot in knots_11:
axs[2].plot(knot * np.ones(101), np.linspace(-5, 5, 101), '--', c='grey', zorder=1)
axs[2].plot(knot * np.ones(101), np.linspace(-5, 5, 101), '--', c='grey', zorder=1, label='Knots')
axs[2].set_xticks([np.pi, (3 / 2) * np.pi])
axs[2].set_xticklabels([r'$\pi$', r'$\frac{3}{2}\pi$'])
box_2 = axs[2].get_position()
axs[2].set_position([box_2.x0 - 0.05, box_2.y0, box_2.width * 0.85, box_2.height])
axs[2].legend(loc='center left', bbox_to_anchor=(1, 0.5), fontsize=8)
axs[2].set_ylim(-5.5, 5.5)
axs[2].set_xlim(0.95 * np.pi, 1.55 * np.pi)
plt.savefig('jss_figures/DFA_different_trends_zoomed.png')
plt.show()
hs_ouputs = hilbert_spectrum(time, imfs_51, hts_51, ifs_51, max_frequency=12, plot=False)
# plot 6c
ax = plt.subplot(111)
figure_size = plt.gcf().get_size_inches()
factor = 0.9
plt.gcf().set_size_inches((figure_size[0], factor * figure_size[1]))
plt.title(textwrap.fill('Gaussian Filtered Hilbert Spectrum of Simple Sinusoidal Time Seres with Added Noise', 50))
x_hs, y, z = hs_ouputs
z_min, z_max = 0, np.abs(z).max()
ax.pcolormesh(x_hs, y, np.abs(z), cmap='gist_rainbow', vmin=z_min, vmax=z_max)
ax.plot(x_hs[0, :], 8 * np.ones_like(x_hs[0, :]), '--', label=r'$\omega = 8$', Linewidth=3)
ax.plot(x_hs[0, :], 4 * np.ones_like(x_hs[0, :]), '--', label=r'$\omega = 4$', Linewidth=3)
ax.plot(x_hs[0, :], 2 * np.ones_like(x_hs[0, :]), '--', label=r'$\omega = 2$', Linewidth=3)
ax.set_xticks([0, np.pi, 2 * np.pi, 3 * np.pi, 4 * np.pi])
ax.set_xticklabels(['$0$', r'$\pi$', r'$2\pi$', r'$3\pi$', r'$4\pi$'])
plt.ylabel(r'Frequency (rad.s$^{-1}$)')
plt.xlabel('Time (s)')
box_0 = ax.get_position()
ax.set_position([box_0.x0, box_0.y0 + 0.05, box_0.width * 0.85, box_0.height * 0.9])
ax.legend(loc='center left', bbox_to_anchor=(1, 0.5))
plt.savefig('jss_figures/DFA_hilbert_spectrum.png')
plt.show()
# plot 6c
time = np.linspace(0, 5 * np.pi, 1001)
time_series = np.cos(time) + np.cos(5 * time)
knots = np.linspace(0, 5 * np.pi, 51)
fluc = Fluctuation(time=time, time_series=time_series)
max_unsmoothed = fluc.envelope_basis_function_approximation(knots_for_envelope=knots, extrema_type='maxima', smooth=False)
max_smoothed = fluc.envelope_basis_function_approximation(knots_for_envelope=knots, extrema_type='maxima', smooth=True)
min_unsmoothed = fluc.envelope_basis_function_approximation(knots_for_envelope=knots, extrema_type='minima', smooth=False)
min_smoothed = fluc.envelope_basis_function_approximation(knots_for_envelope=knots, extrema_type='minima', smooth=True)
util = Utility(time=time, time_series=time_series)
maxima = util.max_bool_func_1st_order_fd()
minima = util.min_bool_func_1st_order_fd()
ax = plt.subplot(111)
plt.gcf().subplots_adjust(bottom=0.10)
plt.title(textwrap.fill('Plot Demonstrating Unsmoothed Extrema Envelopes if Schoenberg–Whitney Conditions are Not Satisfied', 50))
plt.plot(time, time_series, label='Time series', zorder=2, LineWidth=2)
plt.scatter(time[maxima], time_series[maxima], c='r', label='Maxima', zorder=10)
plt.scatter(time[minima], time_series[minima], c='b', label='Minima', zorder=10)
plt.plot(time, max_unsmoothed[0], label=textwrap.fill('Unsmoothed maxima envelope', 10), c='darkorange')
plt.plot(time, max_smoothed[0], label=textwrap.fill('Smoothed maxima envelope', 10), c='red')
plt.plot(time, min_unsmoothed[0], label=textwrap.fill('Unsmoothed minima envelope', 10), c='cyan')
plt.plot(time, min_smoothed[0], label=textwrap.fill('Smoothed minima envelope', 10), c='blue')
for knot in knots[:-1]:
plt.plot(knot * np.ones(101), np.linspace(-3.0, -2.0, 101), '--', c='grey', zorder=1)
plt.plot(knots[-1] * np.ones(101), np.linspace(-3.0, -2.0, 101), '--', c='grey', label='Knots', zorder=1)
plt.xticks((0, 1 * np.pi, 2 * np.pi, 3 * np.pi, 4 * np.pi, 5 * np.pi),
(r'$0$', r'$\pi$', r'2$\pi$', r'3$\pi$', r'4$\pi$', r'5$\pi$'))
plt.yticks((-2, -1, 0, 1, 2), ('-2', '-1', '0', '1', '2'))
plt.xlim(-0.25 * np.pi, 5.25 * np.pi)
box_0 = ax.get_position()
ax.set_position([box_0.x0 - 0.05, box_0.y0, box_0.width * 0.84, box_0.height])
ax.legend(loc='center left', bbox_to_anchor=(1, 0.5))
plt.savefig('jss_figures/Schoenberg_Whitney_Conditions.png')
plt.show()
# plot 7
a = 0.25
width = 0.2
time = np.linspace((0 + a) * np.pi, (5 - a) * np.pi, 1001)
knots = np.linspace((0 + a) * np.pi, (5 - a) * np.pi, 11)
time_series = np.cos(time) + np.cos(5 * time)
utils = emd_utils.Utility(time=time, time_series=time_series)
max_bool = utils.max_bool_func_1st_order_fd()
maxima_x = time[max_bool]
maxima_y = time_series[max_bool]
min_bool = utils.min_bool_func_1st_order_fd()
minima_x = time[min_bool]
minima_y = time_series[min_bool]
inflection_bool = utils.inflection_point()
inflection_x = time[inflection_bool]
inflection_y = time_series[inflection_bool]
fluctuation = emd_mean.Fluctuation(time=time, time_series=time_series)
maxima_envelope = fluctuation.envelope_basis_function_approximation(knots, 'maxima', smooth=False,
smoothing_penalty=0.2, edge_effect='none',
spline_method='b_spline')[0]
maxima_envelope_smooth = fluctuation.envelope_basis_function_approximation(knots, 'maxima', smooth=True,
smoothing_penalty=0.2, edge_effect='none',
spline_method='b_spline')[0]
minima_envelope = fluctuation.envelope_basis_function_approximation(knots, 'minima', smooth=False,
smoothing_penalty=0.2, edge_effect='none',
spline_method='b_spline')[0]
minima_envelope_smooth = fluctuation.envelope_basis_function_approximation(knots, 'minima', smooth=True,
smoothing_penalty=0.2, edge_effect='none',
spline_method='b_spline')[0]
inflection_points_envelope = fluctuation.direct_detrended_fluctuation_estimation(knots,
smooth=True,
smoothing_penalty=0.2,
technique='inflection_points')[0]
binomial_points_envelope = fluctuation.direct_detrended_fluctuation_estimation(knots,
smooth=True,
smoothing_penalty=0.2,
technique='binomial_average', order=21,
increment=20)[0]
derivative_of_lsq = utils.derivative_forward_diff()
derivative_time = time[:-1]
derivative_knots = np.linspace(knots[0], knots[-1], 31)
# change (1) detrended_fluctuation_technique and (2) max_internal_iter and (3) debug (confusing with external debugging)
emd = AdvEMDpy.EMD(time=derivative_time, time_series=derivative_of_lsq)
imf_1_of_derivative = emd.empirical_mode_decomposition(knots=derivative_knots,
knot_time=derivative_time, text=False, verbose=False)[0][1, :]
utils = emd_utils.Utility(time=time[:-1], time_series=imf_1_of_derivative)
optimal_maxima = np.r_[False, utils.derivative_forward_diff() < 0, False] & \
np.r_[utils.zero_crossing() == 1, False]
optimal_minima = np.r_[False, utils.derivative_forward_diff() > 0, False] & \
np.r_[utils.zero_crossing() == 1, False]
EEMD_maxima_envelope = fluctuation.envelope_basis_function_approximation_fixed_points(knots, 'maxima',
optimal_maxima,
optimal_minima,
smooth=False,
smoothing_penalty=0.2,
edge_effect='none')[0]
EEMD_minima_envelope = fluctuation.envelope_basis_function_approximation_fixed_points(knots, 'minima',
optimal_maxima,
optimal_minima,
smooth=False,
smoothing_penalty=0.2,
edge_effect='none')[0]
ax = plt.subplot(111)
plt.gcf().subplots_adjust(bottom=0.10)
plt.title('Detrended Fluctuation Analysis Examples')
plt.plot(time, time_series, LineWidth=2, label='Time series')
plt.scatter(maxima_x, maxima_y, c='r', zorder=4, label='Maxima')
plt.scatter(minima_x, minima_y, c='b', zorder=4, label='Minima')
plt.scatter(time[optimal_maxima], time_series[optimal_maxima], c='darkred', zorder=4,
label=textwrap.fill('Optimal maxima', 10))
plt.scatter(time[optimal_minima], time_series[optimal_minima], c='darkblue', zorder=4,
label=textwrap.fill('Optimal minima', 10))
plt.scatter(inflection_x, inflection_y, c='magenta', zorder=4, label=textwrap.fill('Inflection points', 10))
plt.plot(time, maxima_envelope, c='darkblue', label=textwrap.fill('EMD envelope', 10))
plt.plot(time, minima_envelope, c='darkblue')
plt.plot(time, (maxima_envelope + minima_envelope) / 2, c='darkblue')
plt.plot(time, maxima_envelope_smooth, c='darkred', label=textwrap.fill('SEMD envelope', 10))
plt.plot(time, minima_envelope_smooth, c='darkred')
plt.plot(time, (maxima_envelope_smooth + minima_envelope_smooth) / 2, c='darkred')
plt.plot(time, EEMD_maxima_envelope, c='darkgreen', label=textwrap.fill('EEMD envelope', 10))
plt.plot(time, EEMD_minima_envelope, c='darkgreen')
plt.plot(time, (EEMD_maxima_envelope + EEMD_minima_envelope) / 2, c='darkgreen')
plt.plot(time, inflection_points_envelope, c='darkorange', label=textwrap.fill('Inflection point envelope', 10))
plt.plot(time, binomial_points_envelope, c='deeppink', label=textwrap.fill('Binomial average envelope', 10))
plt.plot(time, np.cos(time), c='black', label='True mean')
plt.xticks((0, 1 * np.pi, 2 * np.pi, 3 * np.pi, 4 * np.pi, 5 * np.pi), (r'$0$', r'$\pi$', r'2$\pi$', r'3$\pi$',
r'4$\pi$', r'5$\pi$'))
plt.yticks((-2, -1, 0, 1, 2), ('-2', '-1', '0', '1', '2'))
plt.xlim(-0.25 * np.pi, 5.25 * np.pi)
box_0 = ax.get_position()
ax.set_position([box_0.x0 - 0.05, box_0.y0, box_0.width * 0.84, box_0.height])
ax.legend(loc='center left', bbox_to_anchor=(1, 0.5))
plt.savefig('jss_figures/detrended_fluctuation_analysis.png')
plt.show()
# Duffing Equation Example
def duffing_equation(xy, ts):
gamma = 0.1
epsilon = 1
omega = ((2 * np.pi) / 25)
return [xy[1], xy[0] - epsilon * xy[0] ** 3 + gamma * np.cos(omega * ts)]
t = np.linspace(0, 150, 1501)
XY0 = [1, 1]
solution = odeint(duffing_equation, XY0, t)
x = solution[:, 0]
dxdt = solution[:, 1]
x_points = [0, 50, 100, 150]
x_names = {0, 50, 100, 150}
y_points_1 = [-2, 0, 2]
y_points_2 = [-1, 0, 1]
fig, axs = plt.subplots(2, 1)
plt.subplots_adjust(hspace=0.2)
axs[0].plot(t, x)
axs[0].set_title('Duffing Equation Displacement')
axs[0].set_ylim([-2, 2])
axs[0].set_xlim([0, 150])
axs[1].plot(t, dxdt)
axs[1].set_title('Duffing Equation Velocity')
axs[1].set_ylim([-1.5, 1.5])
axs[1].set_xlim([0, 150])
axis = 0
for ax in axs.flat:
ax.label_outer()
if axis == 0:
ax.set_ylabel('x(t)')
ax.set_yticks(y_points_1)
if axis == 1:
ax.set_ylabel(r'$ \dfrac{dx(t)}{dt} $')
ax.set(xlabel='t')
ax.set_yticks(y_points_2)
ax.set_xticks(x_points)
ax.set_xticklabels(x_names)
axis += 1
plt.savefig('jss_figures/Duffing_equation.png')
plt.show()
# compare other packages Duffing - top
pyemd = pyemd0215()
py_emd = pyemd(x)
IP, IF, IA = emd040.spectra.frequency_transform(py_emd.T, 10, 'hilbert')
freq_edges, freq_bins = emd040.spectra.define_hist_bins(0, 0.2, 100)
hht = emd040.spectra.hilberthuang(IF, IA, freq_edges)
hht = gaussian_filter(hht, sigma=1)
ax = plt.subplot(111)
figure_size = plt.gcf().get_size_inches()
factor = 1.0
plt.gcf().set_size_inches((figure_size[0], factor * figure_size[1]))
plt.title(textwrap.fill('Gaussian Filtered Hilbert Spectrum of Duffing Equation using PyEMD 0.2.10', 40))
plt.pcolormesh(t, freq_bins, hht, cmap='gist_rainbow', vmin=0, vmax=np.max(np.max(np.abs(hht))))
plt.plot(t[:-1], 0.124 * np.ones_like(t[:-1]), '--', label=textwrap.fill('Hamiltonian frequency approximation', 15))
plt.plot(t[:-1], 0.04 * np.ones_like(t[:-1]), 'g--', label=textwrap.fill('Driving function frequency', 15))
plt.xticks([0, 50, 100, 150])
plt.yticks([0, 0.1, 0.2])
plt.ylabel('Frequency (Hz)')
plt.xlabel('Time (s)')
box_0 = ax.get_position()
ax.set_position([box_0.x0, box_0.y0 + 0.05, box_0.width * 0.75, box_0.height * 0.9])
ax.legend(loc='center left', bbox_to_anchor=(1, 0.5))
plt.savefig('jss_figures/Duffing_equation_ht_pyemd.png')
plt.show()
plt.show()
emd_sift = emd040.sift.sift(x)
IP, IF, IA = emd040.spectra.frequency_transform(emd_sift, 10, 'hilbert')
freq_edges, freq_bins = emd040.spectra.define_hist_bins(0, 0.2, 100)
hht = emd040.spectra.hilberthuang(IF, IA, freq_edges)
hht = gaussian_filter(hht, sigma=1)
ax = plt.subplot(111)
figure_size = plt.gcf().get_size_inches()
factor = 1.0
plt.gcf().set_size_inches((figure_size[0], factor * figure_size[1]))
plt.title(textwrap.fill('Gaussian Filtered Hilbert Spectrum of Duffing Equation using emd 0.3.3', 40))
plt.pcolormesh(t, freq_bins, hht, cmap='gist_rainbow', vmin=0, vmax=np.max(np.max(np.abs(hht))))
plt.plot(t[:-1], 0.124 * np.ones_like(t[:-1]), '--', label=textwrap.fill('Hamiltonian frequency approximation', 15))
plt.plot(t[:-1], 0.04 * np.ones_like(t[:-1]), 'g--', label=textwrap.fill('Driving function frequency', 15))
plt.xticks([0, 50, 100, 150])
plt.yticks([0, 0.1, 0.2])
plt.ylabel('Frequency (Hz)')
plt.xlabel('Time (s)')
box_0 = ax.get_position()
ax.set_position([box_0.x0, box_0.y0 + 0.05, box_0.width * 0.75, box_0.height * 0.9])
ax.legend(loc='center left', bbox_to_anchor=(1, 0.5))
plt.savefig('jss_figures/Duffing_equation_ht_emd.png')
plt.show()
# compare other packages Duffing - bottom
emd_duffing = AdvEMDpy.EMD(time=t, time_series=x)
emd_duff, emd_ht_duff, emd_if_duff, _, _, _, _ = emd_duffing.empirical_mode_decomposition(verbose=False)
fig, axs = plt.subplots(2, 1)
plt.subplots_adjust(hspace=0.3)
figure_size = plt.gcf().get_size_inches()
factor = 0.8
plt.gcf().set_size_inches((figure_size[0], factor * figure_size[1]))
axs[0].plot(t, emd_duff[1, :], label='AdvEMDpy')
axs[0].plot(t, py_emd[0, :], '--', label='PyEMD 0.2.10')
axs[0].plot(t, emd_sift[:, 0], '--', label='emd 0.3.3')
axs[0].set_title('IMF 1')
axs[0].set_ylim([-2, 2])
axs[0].set_xlim([0, 150])
axs[1].plot(t, emd_duff[2, :], label='AdvEMDpy')
print(f'AdvEMDpy driving function error: {np.round(sum(abs(0.1 * np.cos(0.04 * 2 * np.pi * t) - emd_duff[2, :])), 3)}')
axs[1].plot(t, py_emd[1, :], '--', label='PyEMD 0.2.10')
print(f'PyEMD driving function error: {np.round(sum(abs(0.1 * np.cos(0.04 * 2 * np.pi * t) - py_emd[1, :])), 3)}')
axs[1].plot(t, emd_sift[:, 1], '--', label='emd 0.3.3')
print(f'emd driving function error: {np.round(sum(abs(0.1 * np.cos(0.04 * 2 * np.pi * t) - emd_sift[:, 1])), 3)}')
axs[1].plot(t, 0.1 * np.cos(0.04 * 2 * np.pi * t), '--', label=r'$0.1$cos$(0.08{\pi}t)$')
axs[1].set_title('IMF 2')
axs[1].set_ylim([-0.2, 0.4])
axs[1].set_xlim([0, 150])
axis = 0
for ax in axs.flat:
ax.label_outer()
if axis == 0:
ax.set_ylabel(r'$\gamma_1(t)$')
ax.set_yticks([-2, 0, 2])
if axis == 1:
ax.set_ylabel(r'$\gamma_2(t)$')
ax.set_yticks([-0.2, 0, 0.2])
box_0 = ax.get_position()
ax.set_position([box_0.x0, box_0.y0, box_0.width * 0.85, box_0.height])
ax.legend(loc='center left', bbox_to_anchor=(1, 0.5), fontsize=8)
ax.set_xticks(x_points)
ax.set_xticklabels(x_names)
axis += 1
plt.savefig('jss_figures/Duffing_equation_imfs.png')
plt.show()
hs_ouputs = hilbert_spectrum(t, emd_duff, emd_ht_duff, emd_if_duff, max_frequency=1.3, plot=False)
ax = plt.subplot(111)
plt.title(textwrap.fill('Gaussian Filtered Hilbert Spectrum of Duffing Equation using AdvEMDpy', 40))
x, y, z = hs_ouputs
y = y / (2 * np.pi)
z_min, z_max = 0, np.abs(z).max()
figure_size = plt.gcf().get_size_inches()
factor = 1.0
plt.gcf().set_size_inches((figure_size[0], factor * figure_size[1]))
ax.pcolormesh(x, y, np.abs(z), cmap='gist_rainbow', vmin=z_min, vmax=z_max)
plt.plot(t[:-1], 0.124 * np.ones_like(t[:-1]), '--', label=textwrap.fill('Hamiltonian frequency approximation', 15))
plt.plot(t[:-1], 0.04 * np.ones_like(t[:-1]), 'g--', label=textwrap.fill('Driving function frequency', 15))
plt.xticks([0, 50, 100, 150])
plt.yticks([0, 0.1, 0.2])
plt.ylabel('Frequency (Hz)')
plt.xlabel('Time (s)')
box_0 = ax.get_position()
ax.set_position([box_0.x0, box_0.y0 + 0.05, box_0.width * 0.75, box_0.height * 0.9])
ax.legend(loc='center left', bbox_to_anchor=(1, 0.5))
plt.savefig('jss_figures/Duffing_equation_ht.png')
plt.show()
# Carbon Dioxide Concentration Example
CO2_data = pd.read_csv('Data/co2_mm_mlo.csv', header=51)
plt.plot(CO2_data['month'], CO2_data['decimal date'])
plt.title(textwrap.fill('Mean Monthly Concentration of Carbon Dioxide in the Atmosphere', 35))
plt.ylabel('Parts per million')
plt.xlabel('Time (years)')
plt.savefig('jss_figures/CO2_concentration.png')
plt.show()
signal = CO2_data['decimal date']
signal = np.asarray(signal)
time = CO2_data['month']
time = np.asarray(time)
# compare other packages Carbon Dioxide - top
pyemd = pyemd0215()
py_emd = pyemd(signal)
IP, IF, IA = emd040.spectra.frequency_transform(py_emd[:2, :].T, 12, 'hilbert')
print(f'PyEMD annual frequency error: {np.round(sum(np.abs(IF[:, 0] - np.ones_like(IF[:, 0]))), 3)}')
freq_edges, freq_bins = emd040.spectra.define_hist_bins(0, 2, 100)
hht = emd040.spectra.hilberthuang(IF, IA, freq_edges)
hht = gaussian_filter(hht, sigma=1)
fig, ax = plt.subplots()
figure_size = plt.gcf().get_size_inches()
factor = 0.8
plt.gcf().set_size_inches((figure_size[0], factor * figure_size[1]))
plt.title(textwrap.fill('Gaussian Filtered Hilbert Spectrum of CO$_{2}$ Concentration using PyEMD 0.2.10', 45))
plt.ylabel('Frequency (year$^{-1}$)')
plt.xlabel('Time (years)')
plt.pcolormesh(time, freq_bins, hht, cmap='gist_rainbow', vmin=0, vmax=np.max(np.max(np.abs(hht))))
plt.plot(time, np.ones_like(time), 'k--', label=textwrap.fill('Annual cycle', 10))
box_0 = ax.get_position()
ax.set_position([box_0.x0 + 0.0125, box_0.y0 + 0.075, box_0.width * 0.8, box_0.height * 0.9])
ax.legend(loc='center left', bbox_to_anchor=(1, 0.5))
plt.savefig('jss_figures/CO2_Hilbert_pyemd.png')
plt.show()
emd_sift = emd040.sift.sift(signal)
IP, | |
str):
lines.append(Line(x=args[i],
y=args[i+1],
format=args[i+2]))
i = i+3
else:
lines.append(Line(y=args[i+1],
x=args[i],
format=''))
i = i+2
# Next element is str --> no x-value
else:
lines.append(Line(y=args[i],
x='auto',
format=args[i+1]))
i = i+2
# These last cases could be run outside the while loop
if i == nargs-2:
# Either y and format or x and y value left
if isinstance(args[i+1], str):
lines.append(Line(y=args[i],
x='auto',
format=args[i+1]))
else:
lines.append(Line(x=args[i],
y=args[i+1],
format=''))
elif i == nargs-1:
# In this case we have only an y value left
lines.append(Line(y=args[i],
x='auto',
format=''))
# add the lines to the axes in ax:
ax.add(lines)
# Set legends
if 'legend' in kwargs:
no_lines = len(lines) # number of lines added
legends = kwargs['legend']
if isinstance(legends, (tuple,list)): # legends is a sequence
if len(legends) == no_lines:
for i in range(no_lines):
legend = legends[no_lines-i-1]
if isinstance(legend, str):
ax.getp('plotitems')[-1-i].setp(legend=legend)
else:
print "Legend "+legend+" is not a string"
else:
print 'Number of legend items (%d) is not equal to '\
'number of lines in plotcommand (%d)' % \
(len(legends), no_lines)
elif isinstance(legends,str): # only one legend
ax.getp('plotitems')[-1].setp(legend=legends)
del kwargs['legend']
if 'legend_loc' in kwargs:
# No test on validity as in legend method...
ax.setp(legend_loc=kwargs['legend_loc'])
if 'legend_fancybox' in kwargs:
ax.setp(legend_fancybox=kwargs['legend_fancybox'])
if not ax.getp('hold') and not 'box' in kwargs:
kwargs['box'] = True
# set keyword arguments in all the added lines
for line in lines:
line.setp(**kwargs)
# automatically add line colors if this is not specified:
if not line.getp('linecolor'):
line.setp(linecolor=ax.get_next_color())
ax.setp(**kwargs)
self.gcf().setp(**kwargs)
self.setp(**kwargs)
if self.getp('interactive') and self.getp('show'):
self._replot()
return lines
def loglog(self, *args, **kwargs):
"""Draw a loglog plot with logarithmic scaling on x- and y-axis.
Calling::
loglog(...)
is the same as calling plot(...) with the exception that a
logarithmic (base 10) scale is used for both x- and y-axes.
"""
kwargs['log'] = 'xy'
return self.plot(*args, **kwargs)
def semilogx(self, *args, **kwargs):
"""Draw a semilog plot with logarithmic scaling on x-axis.
Calling::
semilogx(...)
is the same as calling plot(...) with the exception that a
logarithmic (base 10) scale is used for the x-axis.
"""
kwargs['log'] = 'x'
return self.plot(*args, **kwargs)
def semilogy(self, *args, **kwargs):
"""Draw a semilog plot with logarithmic scaling on y-axis.
Calling::
semilogy(...)
is the same as calling plot(...) with the exception that a
logarithmic (base 10) scale is used for the y-axis.
"""
kwargs['log'] = 'y'
return self.plot(*args, **kwargs)
def plot3(self, *args, **kwargs):
"""Draw lines and points in 3D space.
Calling::
plot3(x, y, z)
plots z against x and y, i.e., if x, y, and z are vectors of length n,
then this will plot all the points (x[i], y[i], z[i]) for 0<=i<n.
Calling::
plot3(z)
plots values in z on the z-axis
(same as plot3(range(len(z)), range(len(z)), z)).
Calling::
plot3(z, fmt)
plots values in z on z-axis formated like fmt (see the plot command).
Calling::
plot3(x1,y1,z1,fmt1,x2,y2,z2,fmt2,...)
same as hold('on') followed by multiple plot3(x,y,z,fmt).
Calling::
plot3(x1,y1,z1,x2,y2,z2,...)
like above, but automatically chooses different colors.
Calling::
plot3(z1,z2,...,x=x,y=y)
uses x as the values on the x-axis and y as the values on the y-axis
for all the supplied curves (assuming that all have the same length).
By setting x='auto' and y='auto' has the same effect as
x=range(len(z1)) and y=range(len(z1)), respectively.
Calling::
plot3(ax, ...)
plots into the Axis object ax instead of the current axis.
The plot3 command returns a list containing all the created Line
objects.
Examples:
>>> t = linspace(0,10*pi,301)
>>> plot3(sin(t), cos(t), t, title='A helix', grid='on')
"""
if not 'description' in kwargs:
kwargs['description'] = 'plot3: 3D line plot'
if not 'hidden' in kwargs:
kwargs['hidden'] = False
ax, args, nargs = self._check_args(*args)
if nargs == 0:
raise TypeError("plot3: not enough arguments given")
lines = [] # all Line instances are stored here
# If first argument is a format string this will be ignored
# If two format strings are used only the first of them will be used
if 'x' in kwargs and 'y' in kwargs:
if nargs == 1 or (nargs == 2 and isinstance(args[1], str)):
if nargs == 1:
lines.append(Line(x=kwargs['x'],
y=kwargs['y'],
z=args[0],
format=''))
else:
lines.append(Line(x=kwargs['x'],
y=kwargs['y'],
z=args[0],
format=args[1]))
else:
for i in range(len(args)-1):
if not isinstance(args[i], str):
if isinstance(args[i+1], str):
lines.append(Line(x=kwargs['x'],
y=kwargs['y'],
z=args[i],
format=args[1+i]))
else:
lines.append(Line(x=kwargs['x'],
y=kwargs['y'],
z=args[i],
format=''))
if i == nargs-2:
lines.append(Line(x=kwargs['x'],
y=kwargs['y'],
z=args[i+1],
format=''))
# x and y in kwargs are no longer needed:
del kwargs['x']
del kwargs['y']
else: # Normal case
# If an odd number, larger than 2, of non-strings in args are
# between two string arguments, something is wrong.
# If the odd number is one, the argument x='auto' is passed.
i = 0
if nargs in (1,2,3,4):
if not isinstance(args[0], str):
if nargs == 1: # plot3(z)
lines.append(Line(x='auto', y='auto', z=args[0],
format=''))
elif nargs == 2: # plot3(z,fmt)
if isinstance(args[1], str):
lines.append(Line(x='auto', y='auto', z=args[0],
format=args[1]))
elif nargs == 3: # plot3(x,y,z)
if not isinstance(args[2], str):
lines.append(Line(x=args[0], y=args[1], z=args[2],
format=''))
else: # plot(x,y,z,fmt) or plot(z1,fmt1,z2,fmt2)
if not isinstance(args[3], str):
lines.append(Line(x='auto', y='auto', z=args[0],
format=args[1]))
lines.append(Line(x='auto', y='auto', z=args[2],
format=args[3]))
else:
lines.append(Line(x=args[0], y=args[1], z=args[2],
format=args[3]))
i+100 #return
else:
raise ValueError("plot3: cannot plot a formatstring")
while i <= nargs-5:
if not isinstance(args[i], str): # should never be string
# cases:
# 1. plot3(x1,y1,z1,s1, x2,y2,z2,s2, ...)
if not isinstance(args[i+1], str):
if not isinstance(args[i+2], str):
if isinstance(args[i+3], str):
lines.append(Line(x=args[i],
y=args[i+1],
z=args[i+2],
format=args[i+3]))
i += 4
else:
lines.append(Line(x=args[i],
y=args[i+1],
z=args[i+2],
format=''))
i += 3
else: # next element is str --> no x and y values
lines.append(Line(x='auto', y='auto', z=args[i],
format=args[i+1]))
i += 2
# 2. plot3(x1,y1,z1, x2,y2,z2, ...)
# 3. plot3(z1,s1, z2,s2, ...)
if i == nargs-4:
if not isinstance(args[i+1], str):
lines.append(Line(x=args[i],
y=args[i+1],
z=args[i+2],
format=args[i+3]))
else:
lines.append(Line(x='auto', y='auto', z=args[i],
format=args[i+1]))
lines.append(Line(x='auto', y='auto', z=args[i+2],
format=args[i+3]))
elif i == nargs-3: # x, y, and z left
lines.append(Line(x=args[i], y=args[i+1], z=args[i+2],
format=''))
elif i == nargs-2: # only z and format string left
if isinstance(args[i+1], str):
lines.append(Line(x='auto', y='auto', z=args[i],
format=args[i+1]))
elif i == nargs-1: # only a z value left
lines.append(Line(x='auto', y='auto', z=args[i],
format=''))
# add the lines to the axes in ax:
ax.add(lines)
# Set legends
if 'legend' in kwargs:
no_lines = len(lines)
legends = kwargs['legend']
if isinstance(legends, (tuple,list)): # legends is a sequence
if len(legends) == no_lines:
for i in range(no_lines):
legend = legends[no_lines-i-1]
if isinstance(legend, str):
ax.getp('plotitems')[-1-i].setp(legend=legend)
else:
print "Legend "+legend+" is not a string"
else:
print "Number of legend items (%d) is not equal to " \
"number of lines (%d) in plotcommand" % \
(len(legends), no_lines)
elif isinstance(legends,str): # only one legend
ax.getp('plotitems')[-1].setp(legend=legends)
del kwargs['legend']
if not ax.getp('hold') and not 'view' in kwargs:
kwargs['view'] = 3
# set keyword arguments in all the added lines:
for line in lines:
line.setp(**kwargs)
# automatically add line colors if this is not specified:
if not line.getp('linecolor'):
line.setp(linecolor=ax.get_next_color())
ax.setp(**kwargs)
self.gcf().setp(**kwargs)
self.setp(**kwargs)
if self.getp('interactive') and self.getp('show'):
self._replot()
return lines
def fill(self, *args, **kwargs):
"""Draw filled 2D polygons."""
kwargs['description'] = 'fill: filled 2D polygons'
if not 'edgecolor' in kwargs:
kwargs['edgecolor'] = 'k'
return self.plot(*args, **kwargs)
def stem(self, *args, **kwargs):
"""Draw a stem plot."""
kwargs['description'] = 'stem: stem plot'
return self.plot(*args, **kwargs)
def bar(self, *args, **kwargs):
"""Draw a bar graph.
Calling::
bar(data)
where data is a dictionary on the form
data = {'method1': {'thing1': value, 'thing2': value},
'method2': {'thing1': value, 'thing2': value},
'method3': {'thing1': value, 'thing2': value},}
will draw m bars for every name (key in data), one for each key in
data[name], where the height indicates the value. The name is placed
beneath each of the bar groups on the x axis.
Calling::
bar(Y)
will draw a bar for each of the elements in the vector/matrix Y.
If Y is a matrix, a group of bars from the elements of each row of
Y will be created.
Calling::
bar(x,Y)
is the same as above only that the values on the x axis is defined
by the vector x.
Calling::
bar(..., width)
uses the specified width on the bars. The default width is 0.8, while
a width of 1.0 should make the bars just touch each | |
<filename>pycrud/query.py
import dataclasses
import json
from dataclasses import dataclass, field
from typing import List, Union, Set, Dict, Any, Type, Mapping
from typing_extensions import Literal
from pycrud.const import QUERY_OP_COMPARE, QUERY_OP_RELATION, QUERY_OP_FROM_TXT
from pycrud.error import UnknownQueryOperator, InvalidQueryConditionValue, InvalidQueryConditionColumn, \
InvalidOrderSyntax, InvalidQueryConditionOperator
from pycrud.types import RecordMapping, RecordMappingField
class LogicRelation:
def __init__(self, *args):
pass
def and_(self, *args):
pass
def or_(self, *args):
pass
class QueryField:
def __init__(self, field):
self.field = field
self._chains = []
def binary(self, op: Union[QUERY_OP_COMPARE, QUERY_OP_RELATION], value):
# check valid
self._chains.append([self.field, op, value])
return self
def f(field=None) -> Union[QueryField, LogicRelation]:
if field is None:
return LogicRelation()
return QueryField(field)
@dataclass
class SelectExpr:
"""
$ta.id
"""
column: Union[RecordMappingField, Any]
alias: str = None
@property
def table_name(self):
return self.column.table.table_name
@dataclass
class SelectExprTree:
"""
$ta: [
SelectExpr(TableA, 'id')
]
"""
items: List[Union[SelectExpr, 'SelectExprTree']]
alias: str
class QueryField:
def __init__(self, field):
self.field = field
self._chains = []
def binary(self, op: Union[QUERY_OP_COMPARE, QUERY_OP_RELATION], value):
# check valid
self._chains.append([self.field, op, value])
return self
@dataclass
class QueryOrder:
column: RecordMappingField
order: Union[Literal['asc', 'desc', 'default']] = 'default'
def __eq__(self, other):
if isinstance(other, QueryOrder):
return self.column == other.column and self.order == other.order
return False
def __repr__(self):
return '<QueryOrder %r.%s>' % (self.column, self.order)
@classmethod
def from_text(cls, table: Type[RecordMapping], text):
"""
:param text: order=id.desc, xxx.asc
:return: [
[<column>, asc|desc|default],
[<column2>, asc|desc|default],
]
"""
orders = []
for i in map(str.strip, text.split(',')):
items = i.split('.', 2)
if len(items) == 1:
column_name, order = items[0], 'default'
elif len(items) == 2:
column_name, order = items
else:
raise InvalidOrderSyntax("Invalid order syntax")
column = getattr(table, column_name, None)
if column is None:
raise InvalidOrderSyntax('Unknown column: %s' % column_name)
order = order.lower()
if order not in ('asc', 'desc', 'default'):
raise InvalidOrderSyntax('Invalid order mode: %s' % order)
orders.append(cls(column, order))
return orders
@dataclass
class UnaryExpr:
expr: Union['ConditionExpr', 'ConditionLogicExpr', 'UnaryExpr']
@dataclass
class NegatedExpr(UnaryExpr):
pass
@dataclass
class ConditionExpr:
"""
$ta:id.eq = 123
$ta:id.eq = $tb:id
"""
column: Union[RecordMappingField, Any] # 实际类型是 RecordMappingField,且必须如此
op: Union[QUERY_OP_COMPARE, QUERY_OP_RELATION]
value: Union[RecordMappingField, Any]
def __post_init__(self):
assert isinstance(self.column, RecordMappingField), 'RecordMappingField excepted, got %s' % type(self.column)
@property
def table_name(self) -> str:
return self.column.table.table_name
def __and__(self, other: Union['ConditionExpr', 'ConditionLogicExpr', 'UnaryExpr']) -> 'ConditionLogicExpr':
return ConditionLogicExpr('and', [self, other])
def __or__(self, other: Union['ConditionExpr', 'ConditionLogicExpr', 'UnaryExpr']) -> 'ConditionLogicExpr':
return ConditionLogicExpr('or', [self, other])
def __invert__(self) -> 'NegatedExpr':
return NegatedExpr(self)
@dataclass
class ConditionLogicExpr:
type: Union[Literal['and'], Literal['or']]
items: List[Union[ConditionExpr, 'ConditionLogicExpr', 'UnaryExpr']]
def __and__(self, other: Union['ConditionExpr', 'ConditionLogicExpr', 'UnaryExpr']):
if self.type == 'and':
self.items.append(other)
return self
else:
return ConditionLogicExpr('or', [self, other])
def __or__(self, other: Union['ConditionExpr', 'ConditionLogicExpr', 'UnaryExpr']):
if self.type == 'or':
self.items.append(other)
return self
else:
return ConditionLogicExpr('and', [self, other])
def __invert__(self) -> 'NegatedExpr':
return NegatedExpr(self)
@dataclass
class QueryConditions:
items: List[Union[ConditionExpr, 'ConditionLogicExpr', UnaryExpr]]
@property
def type(self):
return 'and'
AllExprType = Union[QueryConditions, ConditionLogicExpr, ConditionExpr, NegatedExpr]
def check_same_expr(a: AllExprType, b: AllExprType) -> bool:
if type(a) != type(b):
return False
if isinstance(a, NegatedExpr):
return check_same_expr(a.expr, b.expr)
elif isinstance(a, RecordMappingField):
return a.table == b.table and a.name == b.name
elif isinstance(a, ConditionExpr):
if a.op != b.op:
return False
if not check_same_expr(a.column, b.column):
return False
if isinstance(a.value, (QueryConditions, ConditionLogicExpr, ConditionExpr, NegatedExpr)):
return check_same_expr(a.value, b.value)
else:
return a.value == b.value
elif isinstance(a, ConditionLogicExpr):
if a.type != b.type:
return False
if len(a.items) != len(b.items):
return False
for i, j in zip(a.items, b.items):
if not check_same_expr(i, j):
return False
return True
elif isinstance(a, QueryConditions):
for i, j in zip(a.items, b.items):
if not check_same_expr(i, j):
return False
return True
return a == b
@dataclass
class QueryJoinInfo:
table: Type[RecordMapping]
conditions: QueryConditions
type: Union[Literal['inner', 'left']] = 'left'
limit: int = -1 # unlimited
@dataclass
class QueryInfo:
"""
{
'username.eq': '111',
}
{
'$or': {
'username.eq': '111',
'name.ne': '22'
}
}
// 方案一
// 方案问题:
// 1. 如果只允许外部表join当前表,那么表达能力不如方案二;如果主表也能写涉及外部表的条件,自由度过大,容易写出奇怪的语句
{
'$select': 'aa, bb, cc', // 选中
'$select-': 'dd, ee, ff', // 排除
'$order-by': 'aa.desc, bb.asc',
'$foreign-key': {
'user_info': { // 外联表名
'$select': ...
'id.eq': '$src.id'
},
'user_info[]': { // 外联表名
'$select': ...
'id.eq': '$src.id'
},
'session': {
'id.eq': '$user_info.id' // 不能允许这个
}
},
'time.gt': '$session.time', // 暂不允许inner join
}
// 关键字:$select、$select-,$order-by,$foreign-key
// 方案二
// 方案问题:
// 1. value 有时是str 有时是表达式
// 2. 如果不做限制,实际上任意一个接口都差不多具备全库查询能力
// 3. join时候要区分inner outter还是有些复杂
{
'$from': 'ta', // 此为隐含条件,不需要直接写出
'$from_others': ['tb'], // join的表
'$select': ['aa', 'bb', '$tb:cc', '$ta'], // $ta 代指ta表,返回json:{'aa': xx, 'bb': xx, '$tb:cc': xx, '$ta': xxx}
'$select': {'aa': null, 'bb': null, '$tb:cc': 'cc', '$ta': 'a_info'], // 返回结果同上 用value的名字覆盖上面的
'$id.eq': '$tb:id', // select ... from ta, tb where ta.id = tb.id
'$time.gt': 1, // select ... from ta, tb where ta.time > 1
'$tb:cc.eq': '$ta:id', // select ... from ta, tb where tb.cc = ta.id
'$or': {
'$user_id.eq': '11',
'$user_id.eq': '22',
}
}
// 关键字:$select、$select-,$order-by,$foreign-key,$or,$and
"""
from_table: Type[RecordMapping]
select: List[Union[RecordMappingField, Any]] = field(default_factory=lambda: [])
select_exclude: Set[Union[RecordMappingField, Any]] = None
conditions: QueryConditions = None
order_by: List[QueryOrder] = field(default_factory=lambda: [])
foreign_keys: Dict[str, 'QueryInfo'] = None
offset: int = 0
limit: int = 20
join: List[QueryJoinInfo] = None
select_hidden: Set[Union[RecordMappingField, Any]] = field(default_factory=lambda: set())
def __post_init__(self):
self._select = None
def clone(self):
# TODO: it's shallow copy
return dataclasses.replace(self)
# d = dataclasses.asdict(self)
# if 'conditions' in d:
# d['conditions'] = QueryConditions(**d['conditions'])
# return QueryInfo(d)
@property
def select_for_crud(self):
if self._select is None:
select = []
for i in self.select:
if self.select_exclude:
if i not in set(self.select_exclude):
select.append(i)
else:
select.append(i)
self._select = select
return self._select
@classmethod
def from_table_raw(cls, table, select=None, where=None, *, select_exclude=None):
get_items = lambda keys: [getattr(table, x) for x in keys]
if select is None:
select = get_items(table.record_fields.keys())
return QueryInfo(
table,
select=select,
select_exclude=select_exclude,
conditions=QueryConditions(where) if where else None
)
@classmethod
def from_json(cls, table: Type[RecordMapping], data, from_http_query=False, check_cond_with_field=False):
assert table, 'table must be exists'
assert issubclass(table, RecordMapping)
get_items = lambda keys: [getattr(table, x) for x in keys]
q = cls(table)
def http_value_try_parse(value):
if from_http_query:
if value == 'null':
value = None
else:
try:
return json.loads(value)
except (TypeError, json.JSONDecodeError):
raise InvalidQueryConditionValue(
'right value must can be unserializable with json.loads')
return value
def parse_select(select_text, unselect_text):
if select_text is None:
selected = get_items(table.record_fields.keys())
else:
selected_columns = list(filter(lambda x: x, map(str.strip, select_text.split(','))))
selected = get_items(selected_columns)
if unselect_text is not None:
unselected_columns = list(filter(lambda x: x, map(str.strip, unselect_text.split(','))))
unselected = set(get_items(unselected_columns))
else:
unselected = None
return selected, unselected
def parse_value(_key, field_name, value, *, is_in=False, is_contains=False):
value = http_value_try_parse(value)
if check_cond_with_field:
if isinstance(value, str) and value.startswith('$'):
if ':' in value:
a, b = value.split(':', 1)
t = RecordMapping.all_mappings.get(a[1:])
try:
return getattr(t, b)
except AttributeError:
raise InvalidQueryConditionValue("column not exists: %s" % value)
else:
raise InvalidQueryConditionValue('invalid value: %s, example: "$user:id"' % value)
model_field = table.__fields__.get(field_name)
if is_in:
assert isinstance(value, List), 'The right value of relation operator must be list'
final_value = []
for i in value:
val, err = model_field.validate(i, None, loc=_key)
if err:
raise InvalidQueryConditionValue('invalid value: %s' % value)
final_value.append(val)
else:
if value is None:
final_value = value
else:
final_value, err = model_field.validate(value, None, loc=_key)
if err:
raise InvalidQueryConditionValue('invalid value: %r' % value)
return final_value
def logic_op_check(key: str, op_prefix: str) -> bool:
if key.startswith(op_prefix):
if len(key) == len(op_prefix):
return True
# allow multi logic expr:
# $and.1, $and.2
return key[len(op_prefix):].isdigit()
return False
def try_get_op(op_raw: str) -> str:
if '.' in op_raw:
a, b = op_raw.split('.', 1)
if b.isdigit():
return a
else:
raise InvalidQueryConditionOperator('unknown operator: %s' % op_raw)
return op_raw
def parse_conditions(data):
conditions = []
for key, value in data.items():
if key.startswith('$'):
if logic_op_check(key, '$or'):
conditions.append(ConditionLogicExpr('or', parse_conditions(value)))
elif logic_op_check(key, '$and'):
conditions.append(ConditionLogicExpr('and', parse_conditions(value)))
elif logic_op_check(key, '$not'):
conditions.append(NegatedExpr(
ConditionLogicExpr('and', parse_conditions(value))
))
elif '.' in key:
field_name, op_name = key.split('.', 1)
op_name = try_get_op(op_name)
op = QUERY_OP_FROM_TXT.get(op_name)
if op is None:
raise UnknownQueryOperator(op_name)
is_in = op in (QUERY_OP_RELATION.IN, QUERY_OP_RELATION.NOT_IN)
is_contains = op in (QUERY_OP_RELATION.CONTAINS, QUERY_OP_RELATION.CONTAINS_ANY)
try:
field_ = getattr(table, field_name)
value = parse_value(key, field_name, value, is_in=is_in, is_contains=is_contains)
if is_contains:
if not isinstance(value, List):
raise InvalidQueryConditionValue('right value of contains should be list: %s' % value)
conditions.append(ConditionExpr(field_, op, value))
except AttributeError:
raise InvalidQueryConditionColumn("column not exists: %s" % field_name)
return conditions
q.select, q.select_exclude = parse_select(data.get('$select'), data.get('$select-'))
q.conditions = QueryConditions(parse_conditions(data))
for key, value in data.items():
if key.startswith('$'):
if key == '$order-by':
q.order_by = QueryOrder.from_text(table, value)
elif key == '$fks' or key == '$foreign-keys':
value = http_value_try_parse(value)
assert isinstance(value, Mapping)
q.foreign_keys = {}
for k, v in value.items():
k2 = k[:-2] if k.endswith('[]') else k
t = table.all_mappings.get(k2)
if t:
q.foreign_keys[k] = cls.from_json(t, v, check_cond_with_field=True)
continue
if '.' | |
<reponame>ujjwalsh/CSB<filename>csb/bio/fragments/rosetta.py
"""
Rosetta fragment libraries.
This module defines the L{RosettaFragmentMap} objects, which describes a
fragment library in Rosetta NNmake format. L{RosettaFragmentMap} has a
static factory method for building a library from a fragment file:
>>> RosettaFragmentMap.read('fragments.txt')
<RosettaFragmentMap>
@note: Consider extracting L{RosettaFragmentMap.read} as a Rosetta
fragment parser which naturally belongs to csb.bio.io.
"""
from csb.bio.structure import TorsionAnglesCollection, TorsionAngles
from csb.core import AbstractContainer
class ResidueInfo(object):
"""
Container struct for a single rosetta fragment residue.
@param rank: residue position (in the source chain, 1-based)
@type rank: int
@param aa: amino acid
@type aa: str
@param ss: secondary structure
@type ss: str
@param torsion: torsion angles
@type torsion: L{csb.bio.structure.TorsionAngles}
"""
def __init__(self, rank, aa, ss, torsion, calpha=[]):
self.rank = rank
self.aa = aa
self.ss = ss
self.torsion = torsion
self.calpha = tuple(calpha)
@property
def phi(self):
return self.torsion.phi or 0.
@property
def psi(self):
return self.torsion.psi or 0.
@property
def omega(self):
return self.torsion.omega or 0.
def copy(self):
"""
@return: a deep copy of the struct
@rtype: L{ResidueInfo}
"""
return ResidueInfo(self.rank, self.aa, self.ss, self.torsion.copy(), self.calpha)
class RosettaFragment(object):
"""
Represents a single Rosetta fragment match.
@param source_id: entry ID of the source PDB chain (in accnC format)
@type source_id: str
@param qstart: start position in target (rank)
@type qstart: int
@param qend: end position in target (rank)
@type qend: int
@param start: start position in C{source} (rank)
@type start: int
@param end: end position in C{source} (rank)
@type end: int
@param score: score of the fragment
@type score: float
@param residues: fragment residue structs
@type residues: iterable of L{ResidueInfo}
"""
def __init__(self, source_id, qstart, qend, start, end, score, residues):
if not (qend - qstart + 1) == (end - start + 1) == len(residues):
raise ValueError()
if not len(source_id) == 5:
raise ValueError(source_id)
self._source_id = str(source_id)
self._qstart = int(qstart)
self._qend = int(qend)
self._start = int(start)
self._end = int(end)
self._score = float(score)
self._residues = list(residues)
def subregion(self, qstart, qend):
"""
Extract a subregion from the fragment.
@param qstart: start position in target
@type qstart: int
@param qend: end position in target
@type qend: int
@return: a new fragment (deep copy)
@rtype: L{RosettaFragment}
"""
if not self.qstart <= qstart <= qend <= self.qend:
raise ValueError('Invalid subregion')
start = qstart - self.qstart + self.start
end = qend - self.qend + self.end
diff = qstart - self.qstart
size = qend - qstart + 1
assert 0 <= diff
residues = [ r.copy() for r in self.residues[diff : diff + size] ]
assert len(residues) == size
return RosettaFragment(self.source_id, qstart, qend, start, end, self.score, residues)
def __lt__(self, other):
# lower score means a better fragment
return self.score < other.score
def __iter__(self):
return iter(self._residues)
def __len__(self):
return len(self._residues)
def __str__(self):
out = []
for residue in self.residues:
line = ' {0.accession:4} {0.chain:1} {1.rank:>5} {1.aa:1} {1.ss:1} {1.phi:>8.3f} {1.psi:>8.3f} {1.omega:>8.3f} {0.score:>8.3f}'
out.append(line.format(self, residue))
return '\n'.join(out)
@staticmethod
def from_object(assignment):
"""
Factory method: build a rosetta fragment from an assignment object.
@param assignment: source assignment
@type assignment: L{Assignment}
@rtype: L{RosettaFragment}
"""
residues = []
a = assignment
for rank, aa, torsion, calpha in zip(range(a.start, a.end + 1), a.sequence, a.torsion, a.backbone):
residues.append(ResidueInfo(rank, aa, 'L', torsion, calpha))
return RosettaFragment(a.source_id, a.qstart, a.qend, a.start, a.end, 1 - (a.probability or 0.0), residues)
@property
def length(self):
return len(self)
@property
def source_id(self):
return self._source_id
@property
def accession(self):
return self.source_id[:4]
@property
def chain(self):
return self.source_id[4:]
@property
def id(self):
return '{0.source_id}:{0.start}-{0.end}'.format(self)
@property
def qstart(self):
return self._qstart
@property
def qend(self):
return self._qend
@property
def start(self):
return self._start
@property
def end(self):
return self._end
@property
def score(self):
return self._score
@property
def residues(self):
return tuple(self._residues)
@property
def torsion(self):
return TorsionAnglesCollection([r.torsion for r in self._residues], start=0)
class OutputBuilder(object):
"""
Rosetta fragment file formatter.
@param output: destination stream
@type output: file
"""
def __init__(self, output):
self._out = output
@property
def output(self):
return self._out
def add_position(self, qstart, frags):
"""
Write a new assignment origin.
@param qstart: target position
@type qstart: float
@param frags: number of fragments, starting at that position
@type frags: int
"""
self.output.write(' position: {0:>12} neighbors: {1:>12}\n\n'.format(qstart, len(frags)))
def add_fragment(self, fragment):
"""
Write a new fragment.
@type fragment: L{RosettaFragment}
"""
for residue in fragment.residues:
self.add_residue(fragment, residue)
self.output.write('\n')
self.output.write('\n')
def add_residue(self, fragment, residue):
"""
Write a new fragment residue.
@type fragment: L{RosettaFragment}
@type residue: L{ResidueInfo}
"""
line = ' {0.accession:4} {0.chain:1} {1.rank:>5} {1.aa:1} {1.ss:1} {1.phi:>8.3f} {1.psi:>8.3f} {1.omega:>8.3f} {0.score:>8.3f}'
self.output.write(line.format(fragment, residue))
class ExtendedOutputBuilder(OutputBuilder):
"""
Builds non-standard fragment files, which contain the CA coordinates of
each residue at the end of each line.
"""
def add_residue(self, fragment, residue):
super(ExtendedOutputBuilder, self).add_residue(fragment, residue)
if residue.calpha:
calpha = residue.calpha
else:
calpha = [0, 0, 0]
self.output.write(' {0:>7.3f} {1:>7.3f} {2:>7.3f}'.format(*calpha))
class RosettaFragmentMap(AbstractContainer):
"""
Represents a Rosetta fragment library.
@param fragments: library fragments
@type fragments: iterable of L{RosettaFragment}
@param length: target sequence's length. If not defined, the qend of the
last fragment will be used instead.
@type length: int
"""
def __init__(self, fragments, length=None):
self._fragments = []
self._unconf = set()
self._sources = set()
self._starts = set()
self._ends = set()
self._length = None
for f in fragments:
self.append(f)
if length is not None:
assert length >= self._maxend
self._length = int(length)
else:
self._length = self._maxend
@property
def _maxend(self):
return max(self._ends or [0])
def append(self, fragment):
"""
Append a new L{RosettaFragment}
"""
if self._length and fragment.qend > self._length:
raise ValueError('fragment out of range')
self._fragments.append(fragment)
self._sources.add(fragment.accession)
self._starts.add(fragment.qstart)
self._ends.add(fragment.qend)
def __len__(self):
return len(self._fragments)
@property
def _children(self):
return self._fragments
@property
def unconfident_positions(self):
return tuple(sorted(self._unconf))
@property
def size(self):
return len(self)
@property
def sources(self):
return tuple(self._sources)
@property
def start_positions(self):
return tuple(sorted(self._starts))
def fromsource(self, accession):
"""
@return: a tuple of all fragments, extracted from the specified C{source}.
@param accession: source entry ID
@type accession: str
"""
return tuple(f for f in self._fragments if f.accession == accession)
def starting_at(self, qrank):
"""
@return: a tuple of all fragments, starting at the specified target position.
@param qrank: fragment origin (in target, rank)
@type qrank: int
"""
return tuple(f for f in self._fragments if f.qstart == qrank)
def at(self, qrank):
"""
@return: a tuple of all fragments, covering the specified position.
@param qrank: position in target, rank
@type qrank: int
"""
return tuple(f for f in self._fragments if f.qstart <= qrank <= f.qend)
def mark_unconfident(self, rank):
"""
Mark the specified position in the target as a low-confidence one.
@param rank: position in target
@type rank: int
"""
if not 1 <= rank <= self._length:
raise ValueError(rank)
self._unconf.add(rank)
def complement(self, fragment):
"""
Append C{fragment} to the library, if the fragment is anchored
around a low-confidence position.
@type fragment: L{RosettaFragment}
"""
if not self._unconf:
raise ValueError('no unconfident regions to complement')
f = fragment
for rank in self._unconf:
if f.qstart < rank < f.qend:
if (rank - f.qstart + 1) > 0.4 * (f.qend - f.qstart + 1):
self.append(f)
break
def sort(self, field='score', reverse=False):
"""
Sort the fragments in the library.
"""
self._fragments.sort(key=lambda i:getattr(i, field), reverse=reverse)
def dump(self, file, builder=OutputBuilder):
"""
Write the library to a Rosetta fragment file.
@param file: destination file name
@type file: str
"""
with open(file, 'w') as out:
builder = builder(out)
for qstart in self.start_positions:
frags = self.starting_at(qstart)
builder.add_position(qstart, frags)
for fragment in frags:
builder.add_fragment(fragment)
@staticmethod
def read(file, top=None):
"""
Read a standard fragment file.
@param file: file name
@type file: str
@param top: if defined, read only C{top} fragments per start position
(default=all)
@type top: int or None
@return: parsed fragment | |
<filename>Dietscheduler/lib/diet_sched.py
from pulp import *
from collections import defaultdict
import json
import math
import itertools
import datetime
class Menu:
'''class for all menues to be built'''
def __init__(self):
self.status = False
def calculate_menu(self,food_database,groups,exclude_list,kcal,diet,user_database,user,start_date):
self.food_database = food_database
self.groups = groups
self.exclude_list = exclude_list
self.reversed_ingredient_dict = {self.food_database['ingredients'][key]['name']: key for key in list(self.food_database['ingredients'].keys())}
self.reversed_recipe_dict = {self.food_database['recipes'][key]['name']: key for key in list(self.food_database['recipes'].keys())}
self.kcal = kcal
self.diet = diet
self.user_database = user_database
self.user = user
self.menu_dict = {}
self.goal_dict = {}
self.grocery_dict = defaultdict()
self.result_dict = defaultdict()
self.portions_dict = {}
self.days_since_last_usage_dict = {}
recipe_dictionary = self.food_database["recipes"]
ingredient_dictionary = self.food_database["ingredients"]
nutri_goals = {"keto": {
"calorie": {
"sense": 0,
"goal": self.kcal},
"protein": {
"sense": 0,
"goal": 0.20 * self.kcal},
"fat": {
"sense": 0,
"goal": 0.60 * self.kcal},
"carb": {
"sense": 0,
"goal": 0.10 * self.kcal}
},
"lchf": {
"calorie": {
"sense": 0,
"goal": self.kcal},
"protein": {
"sense": 0,
"goal": 0.40 * self.kcal},
"fat": {
"sense": 0,
"goal": 0.40 * self.kcal},
"carb": {
"sense": 0,
"goal": 0.20 * self.kcal}
}}
meal_goals = {
"M1": {
"calorie": 1 / 3 * self.kcal,
"protein": 1 / 3 * nutri_goals[self.diet]["protein"]["goal"],
"fat": 1 / 3 * nutri_goals[self.diet]["fat"]["goal"],
"carb": 1 / 3 * nutri_goals[self.diet]["carb"]["goal"]},
"M2": {
"calorie": 1 / 3 * self.kcal,
"protein": 1 / 3 * nutri_goals[self.diet]["protein"]["goal"],
"fat": 1 / 3 * nutri_goals[self.diet]["fat"]["goal"],
"carb": 1 / 3 * nutri_goals[self.diet]["carb"]["goal"]},
"M3": {
"calorie": 1 / 3 * self.kcal,
"protein": 1 / 3 * nutri_goals[self.diet]["protein"]["goal"],
"fat": 1 / 3 * nutri_goals[self.diet]["fat"]["goal"],
"carb": 1 / 3 * nutri_goals[self.diet]["carb"]["goal"]},
}
# create a dict with recipe_id - timedelta bindings
recipe_time_distance_dict = {}
for recipe_id in food_database['recipes'].keys():
if recipe_id in user_database[self.user]['recipes'].keys():
latest_date_recipe = max([datetime.datetime.strptime(elem, '%d.%m.%Y') for elem in user_database[self.user]['recipes'][recipe_id]])
delta = datetime.datetime.strptime(start_date,'%d.%m.%Y')-latest_date_recipe
recipe_time_distance_dict[recipe_id] = int(delta.days)
print(recipe_id, "last used on day:", latest_date_recipe, "distance:",int(delta.days))
else:
recipe_time_distance_dict[recipe_id] = 100
for key in recipe_time_distance_dict.keys():
print("recipe:",key, "\tDistance:",recipe_time_distance_dict[key])
# retrieve a list of ingredients that should be included into the optimization
opti_ingredient = [ingredient for ingredient in ingredient_dictionary if
ingredient_dictionary[ingredient]['optimization_include'] == True]
# retrieve a list of ingredients that are distinct
distinct_ingredient = [ingredient for ingredient in ingredient_dictionary if
ingredient_dictionary[ingredient]['distinct_ingredient'] == True]
# multiply each ingredient and it's nutrient by standard serving and calories
opti_ingredient_prop = defaultdict()
for prop, fac in zip(["price", "calorie", "protein", "fat", "carb"], [1, 1, 4.1, 9.1, 4.1]):
opti_ingredient_prop[prop] = dict()
for recipe in recipe_dictionary:
for ingredient in opti_ingredient:
if ingredient in recipe_dictionary[recipe]['ingredient']:
if prop == "price":
opti_ingredient_prop[prop][recipe, ingredient] = float(ingredient_dictionary[ingredient][prop]) * fac * \
float(ingredient_dictionary[ingredient][
"standard_qty"])
else:
opti_ingredient_prop[prop][recipe, ingredient] = float(ingredient_dictionary[ingredient][prop]) * fac * \
float(ingredient_dictionary[ingredient][
"standard_qty"])
# map recipes and ingredients to one another
recipe_ingredient_map = dict()
for ingredient in opti_ingredient:
for recipe in recipe_dictionary:
if ingredient in recipe_dictionary[recipe]['ingredient']:
recipe_ingredient_map[recipe, ingredient] = {}
# map number of protein sources and possible recipe-combinations:
recipe_combinations_dict = dict()
recipe_combinations = [i for i in itertools.combinations(recipe_dictionary.keys(), len(self.groups))]
for comb in recipe_combinations:
recipe_combinations_dict[comb] = len(
set([recipe_dictionary[comb[i]]["protein_sources"][j] for i in range(len(comb)) for j in
range(len(recipe_dictionary[comb[i]]["protein_sources"]))]))
print(recipe_combinations_dict)
print(recipe_time_distance_dict)
# map groups, recipes and ingredients
groups_recipes_ingredient = [(group, recipe_ingredient) for group in self.groups for recipe_ingredient in
recipe_ingredient_map]
# map groups and recipes
group_recipes = [(group, recipe) for group in self.groups for recipe in recipe_dictionary]
# set up the lp-Variables:
recipe_date_distance_lp = LpVariable.dict("Time_Distance", recipe_time_distance_dict, 0, cat = 'Continuous') # indicates the time-distance to last usage
recipe_combination_indicator_lp = LpVariable.dicts("Combo_Chosen", recipe_combinations_dict, 0, cat='Binary')
recipe_ingredient_amount_lp = LpVariable.dicts("Recipe_Ingredient_Amount", recipe_ingredient_map, 0,
cat='Continuous') # amount ingredient, linked to recipe
recipe_indicator_lp = LpVariable.dicts("Chosen", recipe_dictionary, 0,
cat="Binary") # indicates, if a recipe is chosen
group_recipe_amount_lp = LpVariable.dicts("Amount", group_recipes, 0,
cat='Integer') # amount of recipe in a group
group_recipe_indicator_lp = LpVariable.dicts("Chosen", group_recipes, 0,
cat='Binary') # indicates if a recipe is chosen in a group
group_recipe_ingredient_amount_lp = LpVariable.dicts("Amount_Day", groups_recipes_ingredient, 0,
cat='Integer') # group-recipe-ingredient-mapping, amount of which used in group
# initialize Problem
prob = LpProblem("Diet Problem", LpMinimize)
prob += lpSum([opti_ingredient_prop['price'][r, i] *
recipe_ingredient_amount_lp[r, i]
for r in recipe_dictionary
for i in opti_ingredient
if i in recipe_dictionary[r]['ingredient']]) \
- 1 * lpSum([recipe_combinations_dict[comb] * recipe_combination_indicator_lp[comb] for comb in
recipe_combinations_dict])\
- 1000 * lpSum([recipe_time_distance_dict[recipe_id] * recipe_indicator_lp[recipe_id] for recipe_id in
recipe_dictionary])
print([recipe_time_distance_dict[recipe_id] * recipe_date_distance_lp[recipe_id] for recipe_id in
recipe_dictionary],"\n",
[recipe_combinations_dict[comb] * recipe_combination_indicator_lp[comb] for comb in
recipe_combinations_dict]
)
#prob += lpSum([recipe_combination_indicator_lp[combo]*recipe_combinations_dict[combo] for combo in recipe_combinations_dict]) >= len(self.groups)
# set up the constraints for the ingredients, link with amount of portions:
for r in recipe_dictionary:
for i in opti_ingredient:
for group in self.groups:
if i in recipe_dictionary[r]['ingredient']:
prob += group_recipe_ingredient_amount_lp[group, (r, i)] >= group_recipe_amount_lp[group, r] * \
float(recipe_dictionary[r]['ingredient'][i]['constraints']['min'])
prob += group_recipe_ingredient_amount_lp[group, (r, i)] <= group_recipe_amount_lp[group, r] * \
float(recipe_dictionary[r]['ingredient'][i]['constraints']['max'])
# set up the constraints for nutrients per group:
group_constraints = defaultdict()
for group in self.groups:
group_constraints[group] = defaultdict()
for prop in ["calorie", "protein", "fat", "carb"]:
if prop == "calorie": # hard constraint for calories
prob += lpSum([opti_ingredient_prop[prop][r, i] * \
group_recipe_ingredient_amount_lp[group, (r, i)] for r in recipe_dictionary for i in
opti_ingredient if i in \
recipe_dictionary[r]['ingredient']]) >= sum(
[meal_goals[slot[1]][prop] for slot in group]) - 100
prob += lpSum([opti_ingredient_prop[prop][r, i] * \
group_recipe_ingredient_amount_lp[group, (r, i)] for r in recipe_dictionary for i in
opti_ingredient if i in \
recipe_dictionary[r]['ingredient']]) <= sum(
[meal_goals[slot[1]][prop] for slot in group]) + 100
else: # elastic constraint for the other nutrients
group_constraints[group][prop] = defaultdict()
group_constraints[group][prop]["rhs"] = sum([meal_goals[slot[1]][prop] for slot in group])
group_constraints[group][prop]["lhs"] = lpSum([opti_ingredient_prop[prop][r, i] *
group_recipe_ingredient_amount_lp[group, (r, i)]
for r in recipe_dictionary
for i in opti_ingredient
if i in recipe_dictionary[r]['ingredient']])
group_constraints[group][prop]["con"] = LpConstraint(group_constraints[group][prop]["lhs"],
sense=nutri_goals[self.diet][prop]['sense'],
name=str(group) + prop + "_con",
rhs=group_constraints[group][prop]["rhs"])
group_constraints[group][prop]["elastic"] = group_constraints[group][prop][
"con"].makeElasticSubProblem(penalty=1, proportionFreeBound=0.00001)
prob.extend(group_constraints[group][prop]["elastic"])
# recipes in exclude-list should be not be used
for r in self.exclude_list:
prob += lpSum([group_recipe_indicator_lp[g, r] for g in self.groups]) <= 0
# recipes should only be used in fiting slots (M1 -> M1, M3 -> M3, M2 -> M2)
for group in self.groups:
for slot in ["M1", "M2", "M3"]:
if slot in [meal_slot[1] for meal_slot in group]:
prob += lpSum([group_recipe_indicator_lp[group, r] for r in recipe_dictionary if
not recipe_dictionary[r][slot]]) == 0
# link group-recipe-indicator and recipe-amount
for r in recipe_dictionary:
for group in self.groups:
prob += group_recipe_amount_lp[group, r] >= group_recipe_indicator_lp[group, r] * 0.1
prob += group_recipe_amount_lp[group, r] <= group_recipe_indicator_lp[group, r] * 8
# link the recipe-combinations and the recipes:
for recipe in recipe_dictionary:
prob += pulp.lpSum([group_recipe_amount_lp[group, recipe] for group in self.groups]) >= recipe_indicator_lp[ \
recipe] * 0.1
prob += pulp.lpSum([group_recipe_amount_lp[group, recipe] for group in self.groups]) <= recipe_indicator_lp[ \
recipe] * 8
for recipe in recipe_dictionary:
for ingredient in opti_ingredient:
if ingredient in recipe_dictionary[recipe]["ingredient"]:
prob += lpSum([group_recipe_ingredient_amount_lp[group,(recipe,ingredient)] for group in self.groups]) == recipe_ingredient_amount_lp[recipe,ingredient]
# link the combo-indicator and the recipe:
for combo in recipe_combinations_dict:
prob += lpSum([recipe_indicator_lp[recipe] for recipe in combo]) / len(self.groups) >= \
recipe_combination_indicator_lp[combo]
# no idea why this has to be included, but it's important asf
prob += pulp.lpSum([recipe_combination_indicator_lp[combo] for combo in recipe_combinations]) == 1
# every group has to have one recipe
for g in self.groups:
prob += lpSum([group_recipe_indicator_lp[g, r] for r in recipe_dictionary]) == 1
# every recipe in one group at max
for r in recipe_dictionary:
prob += lpSum([group_recipe_indicator_lp[g, r] for g in self.groups]) <= 1
prob.solve(PULP_CBC_CMD(msg=True,maxSeconds=180))
if LpStatus[prob.status] == 'Optimal':
self.status = True
else:
self.status = False
obj = value(prob.objective)
recipe_amount_inv_map = {str(v): k for k, v in group_recipe_amount_lp.items()}
ingredients_inv_map = {str(v): k for k, v in recipe_ingredient_amount_lp.items()}
day_meal_inv_map = {str(v): k for k, v in group_recipe_ingredient_amount_lp.items()}
for prop in ["price", "calorie", "protein", "fat", "carb"]:
self.result_dict[prop] = 0
for v in prob.variables():
if v.value() > 0:
if ("M1" or "M2" or "M3") and "Amount_Day" in str(v):
group = day_meal_inv_map[v.name][0]
result = day_meal_inv_map[v.name][1]
recipe = self.food_database['recipes'][result[0]]['name']
ingredient = self.food_database['ingredients'][result[1]]['name']
if group not in self.menu_dict.keys():
self.menu_dict[group] = {}
if recipe not in self.menu_dict[group]:
self.menu_dict[group][recipe] = {}
self.menu_dict[group][recipe][ingredient] = v.value()
if "Amount_((" in str(v):
recipe_id = recipe_amount_inv_map[v.name][1]
self.portions_dict[recipe_id] = v.value()
if "Recipe_Ingredient_Amount" in str(v):
if self.food_database['ingredients'][ingredients_inv_map[v.name][1]]['name'] in self.grocery_dict.keys():
self.grocery_dict[self.food_database['ingredients'][ingredients_inv_map[v.name][1]]['name']] += v.value()
else:
self.grocery_dict[self.food_database['ingredients'][ingredients_inv_map[v.name][1]]['name']] = v.value()
for prop in ["price","calorie", "protein", "fat", "carb"]:
self.result_dict[prop] += round(opti_ingredient_prop[prop][ingredients_inv_map[v.name]] * (v.value()), 3)
self.recipe_list = []
for key in self.portions_dict.keys():
parsed_recipe = self.food_database['recipes'][key]['name']
self.recipe_list.append(parsed_recipe)
for ingredient in self.food_database['recipes'][key]['ingredient'].keys():
parsed_ingredient = self.food_database['ingredients'][ingredient]['name']
for | |
"""
In fit phase, only select motifs.
table: start index, weight, column it applies to, and count of rows that follow motif
slice into possible motifs
compare motifs (efficiently)
choose the motifs to use for each series
if not shared, can drop column part of index ref
combine the following values into forecasts
consider the weights
magnitude and percentage change
account for forecasts not running the full length of forecast_length
if longer than comparative, append na df then ffill
Profile speed and which code to improve first
Remove for loops
Quantile not be calculated until after pos_forecasts narrowed down to only forecast length
https://krstn.eu/np.nanpercentile()-there-has-to-be-a-faster-way/
"""
self.fit_runtime = datetime.datetime.now() - self.startTime
return self
def predict(
self, forecast_length: int, future_regressor=None, just_point_forecast=False
):
"""Generates forecast data immediately following dates of index supplied to .fit()
Args:
forecast_length (int): Number of periods of data to forecast ahead
regressor (numpy.Array): additional regressor, not used
just_point_forecast (bool): If True, return a pandas.DataFrame of just point forecasts
Returns:
Either a PredictionObject of forecasts and metadata, or
if just_point_forecast == True, a dataframe of point forecasts
"""
predictStartTime = datetime.datetime.now()
forecasts = self.forecasts.head(forecast_length)
if forecasts.shape[0] < forecast_length:
extra_len = forecast_length - forecasts.shape[0]
empty_frame = pd.DataFrame(
index=np.arange(extra_len), columns=forecasts.columns
)
forecasts = pd.concat([forecasts, empty_frame], axis=0, sort=False).fillna(
method='ffill'
)
forecasts.columns = self.column_names
forecasts.index = self.create_forecast_index(forecast_length=forecast_length)
if just_point_forecast:
return forecasts
else:
lower_forecasts = self.lower_forecasts.head(forecast_length)
upper_forecasts = self.upper_forecasts.head(forecast_length)
if lower_forecasts.shape[0] < forecast_length:
extra_len = forecast_length - lower_forecasts.shape[0]
empty_frame = pd.DataFrame(
index=np.arange(extra_len), columns=lower_forecasts.columns
)
lower_forecasts = pd.concat(
[lower_forecasts, empty_frame], axis=0, sort=False
).fillna(method='ffill')
lower_forecasts.columns = self.column_names
lower_forecasts.index = self.create_forecast_index(
forecast_length=forecast_length
)
if upper_forecasts.shape[0] < forecast_length:
extra_len = forecast_length - upper_forecasts.shape[0]
empty_frame = pd.DataFrame(
index=np.arange(extra_len), columns=upper_forecasts.columns
)
upper_forecasts = pd.concat(
[upper_forecasts, empty_frame], axis=0, sort=False
).fillna(method='ffill')
upper_forecasts.columns = self.column_names
upper_forecasts.index = self.create_forecast_index(
forecast_length=forecast_length
)
predict_runtime = datetime.datetime.now() - predictStartTime
prediction = PredictionObject(
model_name=self.name,
forecast_length=forecast_length,
forecast_index=forecasts.index,
forecast_columns=forecasts.columns,
lower_forecast=lower_forecasts,
forecast=forecasts,
upper_forecast=upper_forecasts,
prediction_interval=self.prediction_interval,
predict_runtime=predict_runtime,
fit_runtime=self.fit_runtime,
model_parameters=self.get_params(),
)
return prediction
def get_new_params(self, method: str = 'random'):
"""Return dict of new parameters for parameter tuning."""
comparison_choice = np.random.choice(
a=[
'pct_change',
'pct_change_sign',
'magnitude_pct_change_sign',
'magnitude',
'magnitude_pct_change',
],
size=1,
p=[0.2, 0.1, 0.4, 0.2, 0.1],
).item()
phrase_len_choice = np.random.choice(
a=[5, 10, 15, 20, 30, 90, 360],
p=[0.2, 0.2, 0.1, 0.25, 0.1, 0.1, 0.05],
size=1,
).item()
shared_choice = np.random.choice(a=[True, False], size=1, p=[0.05, 0.95]).item()
distance_metric_choice = np.random.choice(
a=[
'other',
'hamming',
'cityblock',
'cosine',
'euclidean',
'l1',
'l2',
'manhattan',
],
size=1,
p=[0.44, 0.05, 0.1, 0.1, 0.1, 0.2, 0.0, 0.01],
).item()
if distance_metric_choice == 'other':
distance_metric_choice = np.random.choice(
a=[
'braycurtis',
'canberra',
'chebyshev',
'correlation',
'dice',
'hamming',
'jaccard',
'kulsinski',
'mahalanobis',
'minkowski',
'rogerstanimoto',
'russellrao',
# 'seuclidean',
'sokalmichener',
'sokalsneath',
'sqeuclidean',
'yule',
],
size=1,
).item()
max_motifs_choice = float(
np.random.choice(
a=[20, 50, 100, 200, 0.05, 0.2, 0.5],
size=1,
p=[0.4, 0.1, 0.2, 0.09, 0.1, 0.1, 0.01],
).item()
)
recency_weighting_choice = np.random.choice(
a=[0, 0.5, 0.1, 0.01, -0.01, 0.001],
size=1,
p=[0.5, 0.02, 0.05, 0.35, 0.05, 0.03],
).item()
# cutoff_threshold_choice = np.random.choice(
# a=[0.7, 0.9, 0.99, 1.5], size=1, p=[0.1, 0.1, 0.4, 0.4]
# ).item()
cutoff_minimum_choice = np.random.choice(
a=[5, 10, 20, 50, 100, 200, 500], size=1, p=[0, 0, 0.2, 0.2, 0.4, 0.1, 0.1]
).item()
point_method_choice = np.random.choice(
a=['median', 'mean', 'sign_biased_mean'],
size=1,
p=[0.59, 0.3, 0.1],
).item()
return {
'phrase_len': phrase_len_choice,
'comparison': comparison_choice,
'shared': shared_choice,
'distance_metric': distance_metric_choice,
'max_motifs': max_motifs_choice,
'recency_weighting': recency_weighting_choice,
'cutoff_minimum': cutoff_minimum_choice,
'point_method': point_method_choice,
}
def get_params(self):
"""Return dict of current parameters."""
return {
'phrase_len': self.phrase_len,
'comparison': self.comparison,
'shared': self.shared,
'distance_metric': self.distance_metric,
'max_motifs': self.max_motifs,
'recency_weighting': self.recency_weighting,
# 'cutoff_threshold': self.cutoff_threshold,
'cutoff_minimum': self.cutoff_minimum,
'point_method': self.point_method,
}
def looped_motif(
Xa,
Xb,
name,
r_arr=None,
window=10,
distance_metric="minkowski",
k=10,
point_method="mean",
prediction_interval=0.9,
):
"""inner function for Motif model."""
if r_arr is None:
y = Xa[:, window:]
Xa = Xa[:, :window]
else:
y = Xa[r_arr, window:]
Xa = Xa[r_arr, :window]
# model = NearestNeighbors(n_neighbors=10, algorithm='auto', metric='minkowski', n_jobs=1)
# model.fit(Xa)
# model.kneighbors(Xb)
A = cdist(Xa, Xb, metric=distance_metric)
# lowest values
idx = np.argpartition(A, k, axis=0)[:k].flatten()
# distances for weighted mean
results = y[idx]
if point_method == "weighted_mean":
weights = A[idx].flatten()
if weights.sum() == 0:
weights = None
forecast = np.average(results, axis=0, weights=weights)
elif point_method == "mean":
forecast = np.nanmean(results, axis=0)
elif point_method == "median":
forecast = np.nanmedian(results, axis=0)
elif point_method == "midhinge":
q1 = np.nanquantile(results, q=0.25, axis=0)
q2 = np.nanquantile(results, q=0.75, axis=0)
forecast = (q1 + q2) / 2
pred_int = (1 - prediction_interval) / 2
upper_forecast = np.nanquantile(results, q=(1 - pred_int), axis=0)
lower_forecast = np.nanquantile(results, q=pred_int, axis=0)
forecast = pd.Series(forecast)
forecast.name = name
upper_forecast = pd.Series(upper_forecast)
upper_forecast.name = name
lower_forecast = pd.Series(lower_forecast)
lower_forecast.name = name
return (forecast, upper_forecast, lower_forecast)
class Motif(ModelObject):
"""Forecasts using a nearest neighbors type model adapted for probabilistic time series.
Args:
name (str): String to identify class
frequency (str): String alias of datetime index frequency or else 'infer'
prediction_interval (float): Confidence interval for probabilistic forecast
"""
def __init__(
self,
name: str = "Motif",
frequency: str = 'infer',
prediction_interval: float = 0.9,
holiday_country: str = 'US',
random_seed: int = 2020,
verbose: int = 0,
n_jobs: int = 1,
window: int = 5,
point_method: str = "weighted_mean",
distance_metric: str = "minkowski",
k: int = 10,
max_windows: int = 5000,
multivariate: bool = False,
**kwargs
):
ModelObject.__init__(
self,
"MultivariateMotif" if multivariate else "UnivariateMotif",
frequency,
prediction_interval,
holiday_country=holiday_country,
random_seed=random_seed,
verbose=verbose,
n_jobs=n_jobs,
)
self.window = window
self.point_method = point_method
self.distance_metric = distance_metric
self.k = k
self.max_windows = max_windows
self.multivariate = multivariate
def fit(self, df, future_regressor=None):
"""Train algorithm given data supplied.
Args:
df (pandas.DataFrame): Datetime Indexed
"""
df = self.basic_profile(df)
self.df = df
self.fit_runtime = datetime.datetime.now() - self.startTime
return self
def predict(
self, forecast_length: int, future_regressor=None, just_point_forecast=False
):
"""Generates forecast data immediately following dates of index supplied to .fit()
Args:
forecast_length (int): Number of periods of data to forecast ahead
regressor (numpy.Array): additional regressor, not used
just_point_forecast (bool): If True, return a pandas.DataFrame of just point forecasts
Returns:
Either a PredictionObject of forecasts and metadata, or
if just_point_forecast == True, a dataframe of point forecasts
"""
predictStartTime = datetime.datetime.now()
# keep this at top so it breaks quickly if missing version
x = np.lib.stride_tricks.sliding_window_view(
self.df.to_numpy(), self.window + forecast_length, axis=0
)
test_index = self.create_forecast_index(forecast_length=forecast_length)
# subsample windows if needed
r_arr = None
if self.max_windows is not None:
if self.multivariate:
X_size = x.shape[0] * x.shape[1]
else:
X_size = x.shape[0]
if self.max_windows < X_size:
r_arr = np.random.default_rng(self.random_seed).integers(
0, X_size, size=self.max_windows
)
self.parallel = True
if self.n_jobs in [0, 1] or self.df.shape[1] < 5:
self.parallel = False
else:
try:
from joblib import Parallel, delayed
except Exception:
self.parallel = False
# joblib multiprocessing to loop through series
if self.parallel:
df_list = Parallel(n_jobs=(self.n_jobs - 1))(
delayed(looped_motif)(
Xa=x.reshape(-1, x.shape[-1]) if self.multivariate else x[:, i],
Xb=self.df.iloc[-self.window :, i].to_numpy().reshape(1, -1),
name=self.df.columns[i],
r_arr=r_arr,
window=self.window,
distance_metric=self.distance_metric,
k=self.k,
point_method=self.point_method,
prediction_interval=self.prediction_interval,
)
for i in range(self.df.shape[1])
)
else:
df_list = []
for i in range(self.df.shape[1]):
df_list.append(
looped_motif(
Xa=x.reshape(-1, x.shape[-1]) if self.multivariate else x[:, i],
Xb=self.df.iloc[-self.window :, i].to_numpy().reshape(1, -1),
name=self.df.columns[i],
r_arr=r_arr,
window=self.window,
distance_metric=self.distance_metric,
k=self.k,
point_method=self.point_method,
prediction_interval=self.prediction_interval,
)
)
complete = list(map(list, zip(*df_list)))
forecast = pd.concat(complete[0], axis=1)
forecast.index = test_index
lower_forecast = pd.concat(complete[1], axis=1)
lower_forecast.index = test_index
upper_forecast = pd.concat(complete[2], axis=1)
upper_forecast.index = test_index
if just_point_forecast:
return forecast
else:
predict_runtime = datetime.datetime.now() - predictStartTime
prediction = PredictionObject(
model_name=self.name,
forecast_length=forecast_length,
forecast_index=forecast.index,
forecast_columns=forecast.columns,
lower_forecast=lower_forecast,
forecast=forecast,
upper_forecast=upper_forecast,
prediction_interval=self.prediction_interval,
predict_runtime=predict_runtime,
fit_runtime=self.fit_runtime,
model_parameters=self.get_params(),
)
return prediction
def get_new_params(self, method: str = 'random'):
"""Returns dict of new parameters for parameter tuning"""
metric_list = [
'braycurtis',
'canberra',
'chebyshev',
'cityblock',
'correlation',
'cosine',
'dice',
'euclidean',
'hamming',
'jaccard',
'jensenshannon',
'kulsinski',
'mahalanobis',
'matching',
'minkowski',
'rogerstanimoto',
'russellrao',
# 'seuclidean',
'sokalmichener',
'sokalsneath',
'sqeuclidean',
'yule',
]
return {
"window": random.choices([5, 7, 10, 15, 30], [0.2, 0.1, 0.5, 0.1, 0.1])[0],
"point_method": random.choices(
["weighted_mean", "mean", "median", "midhinge"], [0.4, 0.2, 0.2, 0.2]
)[0],
"distance_metric": random.choice(metric_list),
"k": random.choices([5, 10, 15, 20, 100], [0.2, 0.5, 0.1, 0.1, 0.1])[0],
"max_windows": random.choices([None, 1000, 10000], [0.01, 0.1, 0.8])[0],
}
def get_params(self):
"""Return dict of current parameters"""
return {
"window": self.window,
"point_method": self.point_method,
"distance_metric": self.distance_metric,
"k": self.k,
"max_windows": self.max_windows,
}
def predict_reservoir(
df,
forecast_length,
prediction_interval=None,
warmup_pts=1,
k=2,
ridge_param=2.5e-6,
seed_pts: int = 1,
seed_weighted: str = None,
):
"""Nonlinear Variable Autoregression or 'Next-Generation Reservoir Computing'
based on https://github.com/quantinfo/ng-rc-paper-code/
<NAME>., <NAME>., <NAME>. | |
tok in break_tokens:
if tok in tokenizer.encode(tokenizer._eos_token):
continue
new_tokens.append(tok)
# ipdb.set_trace()
response = tokenizer.decode(new_tokens).strip(' ,.')
return response
def convert_belief(belief):
dic = {}
for bs in belief:
if bs in [' ', '']:
continue
domain = bs.split(' ')[0]
slot = bs.split(' ')[1]
if slot == 'book':
slot = ' '.join(bs.split(' ')[1:3])
value = ' '.join(bs.split(' ')[3:])
else:
value = ' '.join(bs.split(' ')[2:])
if domain not in dic:
dic[domain] = {}
try:
dic[domain][slot] = value
except:
print(domain)
print(slot)
return dic
def get_db_text(belief_domain, dom, only_match=False):
db_text_tmp = []
# for dom in belief_domain:
if dom not in ['restaurant', 'hotel', 'attraction', 'train']:
db_text_tmp = ''
domain_match = len(multiwoz_db.queryResultVenues_new(dom, belief_domain[dom], real_belief=True))
if dom != 'train':
if domain_match >= 5:
domain_match_text = '>=5'
else:
domain_match_text = '={}'.format(domain_match)
elif dom == 'train':
if domain_match == 0:
domain_match_text = '=0'
elif domain_match == 2:
domain_match_text = '<3'
elif domain_match == 5:
domain_match_text = '<6'
elif domain_match == 10:
domain_match_text = '<11'
elif domain_match == 40:
domain_match_text = '<41'
else:
domain_match_text = '>40'
# if 'fail_book' in goal[dom]:
# for item in goal[dom]['fail_book'].items():
# if item in belief_book_domain[dom].items():
# domain_book_text = 'not available'
# break
# else:
# domain_book_text = 'available'
# else:
# domain_book_text = 'available'
if domain_match == 0:
domain_book_text = 'not available'
else:
domain_book_text = 'available'
# if USE_DB_BOOK_DYNAMIC:
if only_match:
db_text_tmp.append('{} match{}'.format(dom, domain_match_text))
else:
db_text_tmp.append('{} match{} booking={}'.format(dom, domain_match_text, domain_book_text))
return db_text_tmp
def lexicalize_train(delex_response, db_results, turn_beliefs, turn_domain):
if len(db_results) > 0:
sample = random.sample(db_results, k=1)[0]
value_count = len(db_results)
else:
# domain = list(beliefs.keys())[0]
sample = turn_beliefs[turn_domain]
value_count = 0
# print(sample)
lex_response = delex_response
if 'from [value_place] to [value_place]' in delex_response:
departure = sample['departure']
destination = sample['destination']
lex_response = lex_response.replace('from [value_place] to [value_place]', 'from {} to {}'.format(departure, destination))
if 'from [value_place] on [value_day]' in delex_response:
departure = sample['departure']
day = sample['day']
lex_response = lex_response.replace('from [value_place] on [value_day]', 'from {} on {}'.format(departure, day))
if 'from [value_place]' in delex_response:
departure = sample['departure']
# destination = sample['destination']
lex_response = lex_response.replace('from [value_place]', 'from {}'.format(departure))
if 'leaving [value_place] at [value_day]' in delex_response:
departure = sample['departure']
day = sample['day']
lex_response = lex_response.replace('leaving [value_place] at [value_day]', 'leaving {} at {}'.format(departure, day))
if 'leaving [value_place] at [value_time]' in delex_response:
leaveat = sample['leaveAt']
departure = sample['departure']
lex_response = lex_response.replace('leaving [value_place] at [value_time]', 'leaving {} at {}'.format(departure, leaveat))
if 'leaves [value_place] at [value_time]' in delex_response:
leaveat = sample['leaveAt']
departure = sample['departure']
lex_response = lex_response.replace('leaves [value_place] at [value_time]', 'leaves {} at {}'.format(departure, leaveat))
if 'leaves at [value_time]' in delex_response:
if 'leaveAt' in sample:
leaveat = sample['leaveAt']
lex_response = lex_response.replace('leaves at [value_time]', 'leaves at {}'.format(leaveat))
if 'other at [value_time]' in delex_response:
leaveat = sample['leaveAt']
lex_response = lex_response.replace('other at [value_time]', 'other at {}'.format(leaveat))
if 'arrives in [value_place] at [value_time]' in delex_response:
arriveby = sample['arriveBy']
destination = sample['destination']
lex_response = lex_response.replace('arrives in [value_place] at [value_time]', 'arrives in {} at {}'.format(destination, arriveby))
if 'arrives at [value_time]' in delex_response:
arriveby = sample['arriveBy']
lex_response = lex_response.replace('arrives at [value_time]', 'arrives at {}'.format(arriveby))
if '[value_count] of these' in delex_response:
value_count = 'one'
lex_response = lex_response.replace('[value_count] of these', value_count)
if '[value_count] minutes' in delex_response:
lex_response = lex_response.replace('[value_count] minutes', sample['duration'])
if '[value_count]' in delex_response:
value_count = str(value_count)
lex_response = lex_response.replace('[value_count]', value_count)
if 'leaving [value_place]' in delex_response:
departure = sample['departure']
lex_response = lex_response.replace('leaving [value_place]', 'leaving {}'.format(departure))
if 'leaves [value_place]' in delex_response:
departure = sample['departure']
lex_response = lex_response.replace('leaves [value_place]', 'leaves {}'.format(departure))
if 'arrives in [value_place]' in delex_response:
destination = sample['destination']
lex_response = lex_response.replace('arrives in [value_place]', 'arrives in {}'.format(destination))
if '[train_id]' in delex_response:
train_id = sample['trainID']
lex_response = lex_response.replace('[train_id]', train_id)
if '[value_day]' in delex_response:
train_day = sample['day']
lex_response = lex_response.replace('[value_day]', train_day)
if '[value_price]' in delex_response:
train_price = sample['price']
lex_response = lex_response.replace('[value_price]', train_price)
if '[train_reference]' in delex_response:
random_number = random.randint(10000,99999)
lex_response = lex_response.replace('[train_reference]', str(random_number))
return lex_response
def lexicalize_hotel(delex_response, db_results, turn_beliefs, turn_domain):
if len(db_results) > 0:
sample = random.sample(db_results, k=1)[0]
value_count = len(db_results)
else:
# ipdb.set_trace()
# domain = list(beliefs.keys())[0]
sample = turn_beliefs[turn_domain]
value_count = 0
# print(sample)
lex_response = delex_response
try:
if '[hotel_name]' in delex_response:
lex_response = lex_response.replace('[hotel_name]', sample['name'])
if '[hotel_address]' in delex_response:
lex_response = lex_response.replace('[hotel_address]', sample['address'])
if '[value_area]' in delex_response:
lex_response = lex_response.replace('[value_area]', sample['area'])
if 'starting [value_day]' in delex_response:
lex_response = lex_response.replace('starting [value_day]', 'starting {}'.format(beliefs['book day']))
if '[value_pricerange]' in delex_response:
lex_response = lex_response.replace('[value_pricerange]', sample['pricerange'])
if '[value_count] star' in delex_response:
lex_response = lex_response.replace('[value_count] star', '{} star'.format(sample['stars']))
if '[value_count]' in delex_response:
lex_response = lex_response.replace('[value_count]', str(value_count))
if '[hotel_reference]' in delex_response:
random_number = random.randint(10000, 99999)
lex_response = lex_response.replace('[hotel_reference]', str(random_number))
if 'starting [value_day]' in delex_response:
lex_response = lex_response.replace('starting [value_day]', 'starting {}'.format(beliefs['book day']))
if '[value_count] people' in delex_response:
lex_response = lex_response.replace('[value_count] people', '{} people'.format(beliefs['book people']))
if '[value_count] nights' in delex_response:
lex_response = lex_response.replace('[value_count] nights', '{} nights'.format(beliefs['book stay']))
except:
ipdb.set_trace()
return lex_response
def get_turn_domain_old(b, a):
tmp = {}
turn_domain = None
if a == b:
turn_domain = list(a.keys())[0]
# elif len(b.keys()) > len(a.keys()):
# turn_domain = list(set(b) - set(a))[0]
else:
for domain in b.keys():
if domain not in a:
turn_domain = domain
tmp = b
break
tmp = {k: b[domain][k] for k in set(b[domain]) - set(a[domain])}
if tmp != {}:
turn_domain = domain
break
if not turn_domain:
ipdb.set_trace()
print('domain change')
print('chane', tmp)
print(b)
print(a)
# domain = list(tmp.keys())
# if len(domain) > 1:
# raise TypeError()
# elif len(domain) == 0:
# domain = list(a.keys())[0]
# else:
# domain = domain[0]
return turn_domain
def get_turn_domain(beliefs, q):
for k in beliefs.keys():
if k not in q:
q.append(k)
turn_domain = k
return turn_domain
return q[-1]
pp = pprint.PrettyPrinter(indent=4)
prev_beliefs = {}
domain_queue = []
if __name__ == '__main__':
print('\33]0;SimpleTOD\a', end='')
sys.stdout.flush()
model_checkpoint = sys.argv[1]
decoding = sys.argv[2]
if decoding == 'nucleus':
TOP_P = float(sys.argv[3])
delay = 0.5
multiwoz_db = MultiWozDB()
print('\nLoading Model', end="")
if 'openai' in model_checkpoint:
tokenizer = OpenAIGPTTokenizer.from_pretrained(model_checkpoint)
model = OpenAIGPTLMHeadModel.from_pretrained(model_checkpoint)
else:
tokenizer = GPT2Tokenizer.from_pretrained(model_checkpoint)
model = GPT2LMHeadModel.from_pretrained(model_checkpoint)
# model.load_state_dict(torch.load(model_checkpoint))
model.eval()
model.to('cuda')
break_tokens = tokenizer.encode(tokenizer._eos_token) + tokenizer.encode('?') + tokenizer.encode('!')
# break_tokens = tokenizer.encode(tokenizer._eos_token)
MAX_LEN = model.config.n_ctx
if 'openai-gpt' in model_checkpoint:
tokenizer.add_special_tokens({'bos_token': '<|endoftext|>'})
tokenizer.add_special_tokens({'eos_token': '<|endoftext|>'})
sample = 1
print()
print(Fore.MAGENTA + '\nSimpleTOD is ready to chat. What would you like to ask?' + Style.RESET_ALL)
# history = []
context = ''
input_text = ''
turn = 0
# dbmatch = 0
while True:
print(Fore.GREEN)
raw_text = input('You: ')
print(Style.RESET_ALL)
input_text = raw_text.replace('you> ', '')
if input_text in ['q', 'quit']:
break
user = '<|user|> {}'.format(input_text)
context = context + ' ' + user
text = '<|endoftext|> <|context|> {} <|endofcontext|>'.format(context)
# print(context)
text = text.strip()
indexed_tokens = tokenizer.encode(text)
if len(indexed_tokens) > MAX_LEN:
indexed_tokens = indexed_tokens[-1*MAX_LEN:]
# Convert indexed tokens in a PyTorch tensor
tokens_tensor = torch.tensor([indexed_tokens])
# If you have a GPU, put everything on cuda
tokens_tensor = tokens_tensor.to('cuda')
predicted_index = indexed_tokens[-1]
# if decoding == 'nucleus':
# sample_output = model.generate(
# tokens_tensor,
# do_sample=True,
# max_length=MAX_LEN,
# top_p=TOP_P,
# top_k=0
# )
# elif decoding == 'greedy':
# sample_output = model.generate(
# tokens_tensor,
# max_length=MAX_LEN,
# do_sample=False
# )
# predicted_text = tokenizer.decode(sample_output[0], skip_special_tokens=True)
with torch.no_grad():
while predicted_index not in break_tokens:
outputs = model(tokens_tensor)
predictions = outputs[0]
predicted_index = torch.argmax(predictions[0, -1, :]).item()
indexed_tokens += [predicted_index]
tokens_tensor = torch.tensor([indexed_tokens]).to('cuda')
if len(indexed_tokens) > MAX_LEN:
break
if tokenizer.decode(indexed_tokens).endswith('<|endofbelief|>'):
break
tmp_pred = tokenizer.decode(indexed_tokens)
belief_text = get_belief_new_dbsearch(tmp_pred)
# print(tmp_pred)
beliefs = convert_belief(belief_text)
# domain = list(beliefs.keys())[0]
domain = get_turn_domain(beliefs, domain_queue)
if 'db' in model_checkpoint:
if 'dbnmatch' in model_checkpoint:
only_match = True
db_text_tmp = get_db_text(beliefs, dom=domain, only_match=only_match)
else:
db_text_tmp = get_db_text(beliefs, dom=domain)
db_text = ' <|dbsearch|> {} <|endofdbsearch|>'.format(' , '.join(db_text_tmp))
text = tmp_pred + db_text
# print(text)
# continue generation after creating db
indexed_tokens = tokenizer.encode(text)
if len(indexed_tokens) > MAX_LEN:
indexed_tokens = indexed_tokens[-1 * MAX_LEN:]
# Convert indexed tokens in a PyTorch tensor
tokens_tensor = torch.tensor([indexed_tokens])
# If you have a GPU, put everything on cuda
tokens_tensor = tokens_tensor.to('cuda')
predicted_index = indexed_tokens[-1]
truncate_action = False
# Predict all tokens
with torch.no_grad():
while predicted_index not in break_tokens:
outputs = model(tokens_tensor)
predictions = outputs[0]
predicted_index = torch.argmax(predictions[0, -1, :]).item()
indexed_tokens += [predicted_index]
if len(indexed_tokens) > MAX_LEN:
break
predicted_text = tokenizer.decode(indexed_tokens)
if '<|action|>' in predicted_text:
generated_actions = predicted_text.split('<|action|>')[-1].split('<|endofaction|>')[0].split(',')
| |
SLIDE MICROSCOPY IOD': ['Series'],
'12-LEAD ECG IOD': ['Series'],
'MR IMAGE IOD': ['Series'],
'ENHANCED MR COLOR IMAGE IOD': ['Series'],
'ENHANCED CT IMAGE IOD': ['Series'],
'XRF IMAGE IOD': ['Series'],
'RESPIRATORY WAVEFORM IOD': ['Series'],
'GENERAL AUDIO WAVEFORM IOD': ['Series'],
'GRAYSCALE SOFTCOPY PRESENTATION STATE IOD': ['Series'],
'AUTOREFRACTION MEASUREMENTS IOD': ['Series'],
'SURFACE SEGMENTATION IOD': ['Series'],
'VL SLIDE-COORDINATES MICROSCOPIC IMAGE IOD': ['Series'],
'VL MICROSCOPIC IMAGE IOD': ['Series'],
'SPATIAL REGISTRATION IOD': ['Series'],
'ENHANCED PET IMAGE IOD': ['Series'],
'ENHANCED X-RAY RF IMAGE IOD': ['Series'],
'MULTI-FRAME SINGLE BIT SC IMAGE IOD': ['Series'],
'INTRAOCULAR LENS CALCULATIONS IOD': ['Series'],
'VIDEO PHOTOGRAPHIC IMAGE IOD': ['Series'],
'X-RAY 3D CRANIOFACIAL IMAGE IOD': ['Series'],
'VL ENDOSCOPIC IMAGE IOD': ['Series'],
'KERATOMETRY MEASUREMENTS IOD': ['Series'],
'BREAST TOMOSYNTHESIS IMAGE IOD': ['Series'],
'CR IMAGE IOD': ['Series'],
},
# AcquisitionTime
0x00080032L: {
'SC IMAGE IOD': ['Image'],
'MULTI-FRAME TRUE COLOR SC IMAGE IOD': ['Image'],
None: ['Image', 'Dose', 'Segmentation'],
'SEGMENTATION IOD': ['Segmentation'],
'ENHANCED ULTRASOUND VOLUME IOD': ['Image'],
'OPHTHALMIC PHOTOGRAPHY 16 BIT IMAGE IOD': ['Image'],
'MULTI-FRAME GRAYSCALE BYTE SC IMAGE IOD': ['Image'],
'MR IMAGE IOD': ['Image'],
'DIGITAL X-RAY IMAGE IOD': ['Image'],
'OPHTHALMIC PHOTOGRAPHY 8 BIT IMAGE IOD': ['Image'],
'MULTI-FRAME GRAYSCALE WORD SC IMAGE IOD': ['Image'],
'X-RAY ANGIOGRAPHIC IMAGE IOD': ['Image'],
'CT IMAGE IOD': ['Image'],
'NM IMAGE IOD': ['Image'],
'CR IMAGE IOD': ['Image'],
'US MULTI-FRAME IMAGE IOD': ['Image'],
'VL WHOLE SLIDE MICROSCOPY IOD': ['Image'],
'RT DOSE IOD': ['Dose'],
'VIDEO MICROSCOPIC IMAGE IOD': ['Image'],
'PET IMAGE IOD': ['Image'],
'DIGITAL MAMMOGRAPHY X-RAY IMAGE IOD': ['Image'],
'US IMAGE IOD': ['Image'],
'XRF IMAGE IOD': ['Image'],
'VL PHOTOGRAPHIC IMAGE IOD': ['Image'],
'VL SLIDE-COORDINATES MICROSCOPIC IMAGE IOD': ['Image'],
'VL MICROSCOPIC IMAGE IOD': ['Image'],
'DIGITAL INTRA-ORAL X-RAY IMAGE IOD': ['Image'],
'VIDEO ENDOSCOPIC IMAGE IOD': ['Image'],
'VIDEO PHOTOGRAPHIC IMAGE IOD': ['Image'],
'RT IMAGE IOD': ['Image'],
'VL ENDOSCOPIC IMAGE IOD': ['Image'],
'MULTI-FRAME SINGLE BIT SC IMAGE IOD': ['Image'],
},
# CTAdditionalXRaySourceSequence
0x00189360L: {
'CT IMAGE IOD': ['Image'],
None: ['Image'],
},
# InputInformationSequence
0x00404021L: {
'UNIFIED PROCEDURE STEP IOD': ['Unified Procedure Step'],
'GENERAL PURPOSE SCHEDULED PROCEDURE STEP IOD': ['General Purpose Scheduled Procedure Step'],
None: ['Unified Procedure Step', 'General Purpose Scheduled Procedure Step'],
},
# TreatmentTime
0x30080251L: {
'RT BEAMS TREATMENT RECORD IOD': ['Treatment Record'],
'RT BRACHY TREATMENT RECORD IOD': ['Treatment Record'],
'RT ION BEAMS TREATMENT RECORD IOD': ['Treatment Record'],
'RT TREATMENT SUMMARY RECORD IOD': ['Treatment Record'],
None: ['Treatment Record'],
},
# RetrieveAETitle
0x00080054L: {
'STORAGE COMMITMENT IOD': ['Storage Commitment'],
None: ['Storage Commitment'],
},
# LeftLensSequence
0x00460015L: {
'LENSOMETRY MEASUREMENTS IOD': ['Equipment'],
None: ['Equipment'],
},
# ComponentAssemblySequence
0x00760060L: {
'IMPLANT ASSEMBLY TEMPLATE IOD': ['Implant Assembly'],
None: ['Implant Assembly'],
},
# SourceSequence
0x300A0210L: {
'RT PLAN IOD': ['Plan'],
None: ['Plan'],
},
# ReviewDate
0x300E0004L: {
'RT STRUCTURE SET IOD': ['Structure Set'],
'RT ION PLAN IOD': ['Plan'],
'RT PLAN IOD': ['Plan'],
'RT IMAGE IOD': ['Image'],
None: ['Structure Set', 'Plan', 'Image'],
},
# ReceiveCoilName
0x00181250L: {
'MR IMAGE IOD': ['Image'],
None: ['Image'],
},
# DetectorInformationSequence
0x00540022L: {
'NM IMAGE IOD': ['Image'],
None: ['Image'],
},
# PreserveCompositeInstancesAfterMediaCreation
0x2200000AL: {
'MEDIA CREATION MANAGEMENT IOD': ['Media Creation Management'],
None: ['Media Creation Management'],
},
# HangingProtocolUserGroupName
0x00720010L: {
'HANGING PROTOCOL IOD': ['Hanging Protocol'],
None: ['Hanging Protocol'],
},
# ShutterShape
0x00181600L: {
'COLOR SOFTCOPY PRESENTATION STATE IOD': ['Presentation State'],
'XA/XRF GRAYSCALE SOFTCOPY PRESENTATION STATE IOD': ['Presentation State'],
'GRAYSCALE SOFTCOPY PRESENTATION STATE IOD': ['Presentation State'],
None: ['Image', 'Presentation State'],
'PSEUDO-COLOR SOFTCOPY PRESENTATION STATE IOD': ['Presentation State'],
'DIGITAL INTRA-ORAL X-RAY IMAGE IOD': ['Image'],
'DIGITAL MAMMOGRAPHY X-RAY IMAGE IOD': ['Image'],
'XRF IMAGE IOD': ['Image'],
'DIGITAL X-RAY IMAGE IOD': ['Image'],
'CR IMAGE IOD': ['Image'],
'X-RAY ANGIOGRAPHIC IMAGE IOD': ['Image'],
},
# NumberOfScreens
0x00720100L: {
'HANGING PROTOCOL IOD': ['Hanging Protocol'],
'BASIC STRUCTURED DISPLAY IOD': ['Presentation State'],
None: ['Hanging Protocol', 'Presentation State'],
},
# ShutterLeftVerticalEdge
0x00181602L: {
'COLOR SOFTCOPY PRESENTATION STATE IOD': ['Presentation State'],
'GRAYSCALE SOFTCOPY PRESENTATION STATE IOD': ['Presentation State'],
None: ['Image', 'Presentation State'],
'PSEUDO-COLOR SOFTCOPY PRESENTATION STATE IOD': ['Presentation State'],
'DIGITAL INTRA-ORAL X-RAY IMAGE IOD': ['Image'],
'DIGITAL MAMMOGRAPHY X-RAY IMAGE IOD': ['Image'],
'XRF IMAGE IOD': ['Image'],
'DIGITAL X-RAY IMAGE IOD': ['Image'],
'CR IMAGE IOD': ['Image'],
'X-RAY ANGIOGRAPHIC IMAGE IOD': ['Image'],
},
# ScreeningTestModeCodeSequence
0x00240016L: {
'OPHTHALMIC VISUAL FIELD STATIC PERIMETRY MEASUREMENTS IOD': ['Measurements'],
None: ['Measurements'],
},
# ShutterRightVerticalEdge
0x00181604L: {
'COLOR SOFTCOPY PRESENTATION STATE IOD': ['Presentation State'],
'GRAYSCALE SOFTCOPY PRESENTATION STATE IOD': ['Presentation State'],
None: ['Image', 'Presentation State'],
'PSEUDO-COLOR SOFTCOPY PRESENTATION STATE IOD': ['Presentation State'],
'DIGITAL INTRA-ORAL X-RAY IMAGE IOD': ['Image'],
'DIGITAL MAMMOGRAPHY X-RAY IMAGE IOD': ['Image'],
'XRF IMAGE IOD': ['Image'],
'DIGITAL X-RAY IMAGE IOD': ['Image'],
'CR IMAGE IOD': ['Image'],
'X-RAY ANGIOGRAPHIC IMAGE IOD': ['Image'],
},
# ShutterUpperHorizontalEdge
0x00181606L: {
'COLOR SOFTCOPY PRESENTATION STATE IOD': ['Presentation State'],
'GRAYSCALE SOFTCOPY PRESENTATION STATE IOD': ['Presentation State'],
None: ['Image', 'Presentation State'],
'PSEUDO-COLOR SOFTCOPY PRESENTATION STATE IOD': ['Presentation State'],
'DIGITAL INTRA-ORAL X-RAY IMAGE IOD': ['Image'],
'DIGITAL MAMMOGRAPHY X-RAY IMAGE IOD': ['Image'],
'XRF IMAGE IOD': ['Image'],
'DIGITAL X-RAY IMAGE IOD': ['Image'],
'CR IMAGE IOD': ['Image'],
'X-RAY ANGIOGRAPHIC IMAGE IOD': ['Image'],
},
# RedPaletteColorLookupTableDescriptor
0x00281101L: {
'SC IMAGE IOD': ['Image'],
'MULTI-FRAME TRUE COLOR SC IMAGE IOD': ['Image'],
None: ['Image', 'Color Palette', 'Presentation State', 'Dose', 'Segmentation'],
'SEGMENTATION IOD': ['Segmentation'],
'ENHANCED ULTRASOUND VOLUME IOD': ['Image'],
'OPHTHALMIC PHOTOGRAPHY 16 BIT IMAGE IOD': ['Image'],
'MULTI-FRAME GRAYSCALE BYTE SC IMAGE IOD': ['Image'],
'ENHANCED X-RAY ANGIOGRAPHIC IMAGE IOD': ['Image'],
'DIGITAL X-RAY IMAGE IOD': ['Image'],
'OPHTHALMIC PHOTOGRAPHY 8 BIT IMAGE IOD': ['Image'],
'COLOR PALETTE IOD': ['Color Palette'],
'MULTI-FRAME GRAYSCALE WORD SC IMAGE IOD': ['Image'],
'X-RAY ANGIOGRAPHIC IMAGE IOD': ['Image'],
'INTRAVASCULAR OCT IMAGE IOD': ['Image'],
'ENHANCED MR IMAGE IOD': ['Image'],
'CT IMAGE IOD': ['Image'],
'OPHTHALMIC TOMOGRAPHY IMAGE IOD': ['Image'],
'NM IMAGE IOD': ['Image'],
'BLENDING SOFTCOPY PRESENTATION STATE IOD': ['Presentation State'],
'PSEUDO-COLOR SOFTCOPY PRESENTATION STATE IOD': ['Presentation State'],
'US MULTI-FRAME IMAGE IOD': ['Image'],
'VIDEO ENDOSCOPIC IMAGE IOD': ['Image'],
'VL WHOLE SLIDE MICROSCOPY IOD': ['Image'],
'X-RAY 3D ANGIOGRAPHIC IMAGE IOD': ['Image'],
'RT DOSE IOD': ['Dose'],
'VIDEO MICROSCOPIC IMAGE IOD': ['Image'],
'PET IMAGE IOD': ['Image'],
'DIGITAL MAMMOGRAPHY X-RAY IMAGE IOD': ['Image'],
'US IMAGE IOD': ['Image'],
'MR IMAGE IOD': ['Image'],
'ENHANCED MR COLOR IMAGE IOD': ['Image'],
'ENHANCED CT IMAGE IOD': ['Image'],
'XRF IMAGE IOD': ['Image'],
'VL PHOTOGRAPHIC IMAGE IOD': ['Image'],
'VL SLIDE-COORDINATES MICROSCOPIC IMAGE IOD': ['Image'],
'VL MICROSCOPIC IMAGE IOD': ['Image'],
'DIGITAL INTRA-ORAL X-RAY IMAGE IOD': ['Image'],
'ENHANCED PET IMAGE IOD': ['Image'],
'ENHANCED X-RAY RF IMAGE IOD': ['Image'],
'MULTI-FRAME SINGLE BIT SC IMAGE IOD': ['Image'],
'VIDEO PHOTOGRAPHIC IMAGE IOD': ['Image'],
'RT IMAGE IOD': ['Image'],
'X-RAY 3D CRANIOFACIAL IMAGE IOD': ['Image'],
'VL ENDOSCOPIC IMAGE IOD': ['Image'],
'BREAST TOMOSYNTHESIS IMAGE IOD': ['Image'],
'CR IMAGE IOD': ['Image'],
},
# ShutterLowerHorizontalEdge
0x00181608L: {
'COLOR SOFTCOPY PRESENTATION STATE IOD': ['Presentation State'],
'GRAYSCALE SOFTCOPY PRESENTATION STATE IOD': ['Presentation State'],
None: ['Image', 'Presentation State'],
'PSEUDO-COLOR SOFTCOPY PRESENTATION STATE IOD': ['Presentation State'],
'DIGITAL INTRA-ORAL X-RAY IMAGE IOD': ['Image'],
'DIGITAL MAMMOGRAPHY X-RAY IMAGE IOD': ['Image'],
'XRF IMAGE IOD': ['Image'],
'DIGITAL X-RAY IMAGE IOD': ['Image'],
'CR IMAGE IOD': ['Image'],
'X-RAY ANGIOGRAPHIC IMAGE IOD': ['Image'],
},
# GreenPaletteColorLookupTableDescriptor
0x00281102L: {
'SC IMAGE IOD': ['Image'],
'MULTI-FRAME TRUE COLOR SC IMAGE IOD': ['Image'],
None: ['Image', 'Color Palette', 'Presentation State', 'Dose', 'Segmentation'],
'SEGMENTATION IOD': ['Segmentation'],
'ENHANCED ULTRASOUND VOLUME IOD': ['Image'],
'OPHTHALMIC PHOTOGRAPHY 16 BIT IMAGE IOD': ['Image'],
'MULTI-FRAME GRAYSCALE BYTE SC IMAGE IOD': ['Image'],
'ENHANCED X-RAY ANGIOGRAPHIC IMAGE IOD': ['Image'],
'DIGITAL X-RAY IMAGE IOD': ['Image'],
'OPHTHALMIC PHOTOGRAPHY 8 BIT IMAGE IOD': ['Image'],
'COLOR PALETTE IOD': ['Color Palette'],
'MULTI-FRAME GRAYSCALE WORD SC IMAGE IOD': ['Image'],
'X-RAY ANGIOGRAPHIC IMAGE IOD': ['Image'],
'INTRAVASCULAR OCT IMAGE IOD': ['Image'],
'ENHANCED MR IMAGE IOD': ['Image'],
'CT IMAGE IOD': ['Image'],
'OPHTHALMIC TOMOGRAPHY IMAGE IOD': ['Image'],
'NM IMAGE IOD': ['Image'],
'BLENDING SOFTCOPY PRESENTATION STATE IOD': ['Presentation State'],
'PSEUDO-COLOR SOFTCOPY PRESENTATION STATE IOD': ['Presentation State'],
'US MULTI-FRAME IMAGE IOD': ['Image'],
'VIDEO ENDOSCOPIC IMAGE IOD': ['Image'],
'VL WHOLE SLIDE MICROSCOPY IOD': ['Image'],
'X-RAY 3D ANGIOGRAPHIC IMAGE IOD': ['Image'],
'RT DOSE IOD': ['Dose'],
'VIDEO MICROSCOPIC IMAGE IOD': ['Image'],
'PET IMAGE IOD': ['Image'],
'DIGITAL MAMMOGRAPHY X-RAY IMAGE IOD': ['Image'],
'US IMAGE IOD': ['Image'],
'MR IMAGE IOD': ['Image'],
'ENHANCED MR COLOR IMAGE IOD': ['Image'],
'ENHANCED CT IMAGE IOD': ['Image'],
'XRF IMAGE IOD': ['Image'],
'VL PHOTOGRAPHIC IMAGE IOD': ['Image'],
'VL SLIDE-COORDINATES MICROSCOPIC IMAGE IOD': ['Image'],
'VL MICROSCOPIC IMAGE IOD': ['Image'],
'DIGITAL INTRA-ORAL X-RAY IMAGE IOD': ['Image'],
'ENHANCED PET IMAGE IOD': ['Image'],
'ENHANCED X-RAY RF IMAGE IOD': ['Image'],
'MULTI-FRAME SINGLE BIT SC IMAGE IOD': ['Image'],
'VIDEO PHOTOGRAPHIC IMAGE IOD': ['Image'],
'RT IMAGE IOD': ['Image'],
'X-RAY 3D CRANIOFACIAL IMAGE IOD': ['Image'],
'VL ENDOSCOPIC IMAGE IOD': ['Image'],
'BREAST TOMOSYNTHESIS IMAGE IOD': ['Image'],
'CR IMAGE IOD': ['Image'],
},
# RecordedSourceSequence
0x30080100L: {
'RT BRACHY TREATMENT RECORD | |
<gh_stars>1-10
""" Contains for for Gaussian process Bayesian optimization """
import logging
import random
import pprint
import heapq
from typing import Union
import numpy as np
import torch
from function_utils import CachedFunction, CachedBatchFunction
from gp import TanimotoGP, batch_predict_mu_var_numpy
from graph_ga.graph_ga import run_ga_maximization
# Logger with standard handler
logger = logging.getLogger("gp_bo")
if len(logger.handlers) == 0:
ch = logging.StreamHandler()
formatter = logging.Formatter(
"%(asctime)s - %(name)s - %(levelname)s - %(message)s"
)
ch.setFormatter(formatter)
logger.addHandler(ch)
# Optimize acquisition function with genetic algorithm
def maximize_acquisition_func_ga(
gp_model: TanimotoGP,
acq_func_np: callable,
starting_smiles: list,
smiles_to_np_fingerprint: callable,
**ga_kwargs,
):
# Construct acquisition function for GA
def _acq_func_smiles(smiles_list):
fp_array = np.stack(list(map(smiles_to_np_fingerprint, smiles_list)))
if gp_model.train_inputs[0].dtype == torch.float32:
fp_array = fp_array.astype(np.float32)
elif gp_model.train_inputs[0].dtype == torch.float64:
fp_array = fp_array.astype(np.float64)
else:
raise ValueError(gp_model.train_inputs[0].dtype)
mu_pred, var_pred = batch_predict_mu_var_numpy(
gp_model, torch.as_tensor(fp_array), batch_size=2 ** 15
)
acq_vals = acq_func_np(mu_pred, var_pred)
return list(map(float, acq_vals))
cached_acq_function = CachedBatchFunction(_acq_func_smiles)
# Run GA
_, smiles_2_acq_dict, _ = run_ga_maximization(
starting_population_smiles=list(starting_smiles),
scoring_function=cached_acq_function,
**ga_kwargs,
)
# Sort and return results (highest acq func first)
sm_ac_list = list(smiles_2_acq_dict.items())
sm_ac_list.sort(reverse=True, key=lambda t: t[1])
smiles_out = [s for s, v in sm_ac_list]
acq_out = [v for s, v in sm_ac_list]
return smiles_out, acq_out
# Whole GP BO loop
def gp_bo_loop(
gp_model: TanimotoGP,
scoring_function: Union[callable, CachedFunction],
smiles_to_np_fingerprint: callable,
acq_func_of_time: callable,
max_bo_iter: int,
bo_batch_size: int = 1,
y_transform: callable = None,
gp_train_smiles: list = None,
smiles_pool: list = None,
max_func_calls: int = None,
ga_pool_num_best: int = 250,
ga_pool_num_carryover: int = 250, # number of SMILES with high acq funcs to carry over from last time
max_ga_start_population_size: int = 1000,
ga_population_size: int = 500,
ga_max_generations: int = 25,
ga_offspring_size: int = 1000,
ga_mutation_rate: float = 1e-2,
ga_num_cpu: int = 1,
refit_gp_func: callable = None,
n_top_log: int = 10, # When I log "topN" of something, what should N be?
log_ga_smiles: bool = False, # whether to log all SMILES evaluated with GA.
numpy_dtype=np.float32, # numpy dtype to be using
):
logger.info("Starting GP BO")
# Create the cached function
if not isinstance(scoring_function, CachedFunction):
scoring_function = CachedFunction(scoring_function, transform=y_transform)
start_cache = dict(scoring_function.cache)
start_cache_size = len(start_cache)
logger.debug(f"Starting cache made, has size {start_cache_size}")
logger.info(
f"Top {n_top_log} known starting scores:\n"
+ ", ".join(
f"#{i+1}={v:.3f}"
for i, v in enumerate(
heapq.nlargest(
n_top_log, scoring_function(list(start_cache.keys()), batch=True)
)
)
)
)
# Set up which SMILES the GP should be trained on
# If not given, it is assumed that the GP is trained on all known smiles
if gp_train_smiles is None:
logger.debug(
"No GP training SMILES given. "
f"Will default to training on the {start_cache_size} SMILES with known scores."
)
gp_train_smiles_set = set(start_cache.keys())
else:
gp_train_smiles_set = set(gp_train_smiles)
del gp_train_smiles # should refer to new variables later on; don't want to use by mistake
# Keep a pool of all SMILES encountered (used for seeding GA)
if smiles_pool is None:
smiles_pool = set()
else:
smiles_pool = set(smiles_pool)
smiles_pool.update(start_cache.keys())
smiles_pool.update(gp_train_smiles_set)
logger.debug(f"SMILES pool created, size={len(smiles_pool)}")
assert (
len(smiles_pool) > 0
), "No SMILES were provided to the algorithm as training data, known scores, or a SMILES pool."
# Handle edge case of no training data
if len(gp_train_smiles_set) == 0:
logger.warning(
f"No SMILES were provided to train GP. A random one will be chosen from the pool to start training."
)
random_smiles = random.choice(list(smiles_pool))
logger.debug(f"The following SMILES was chosen:\n\t{random_smiles}")
gp_train_smiles_set.add(random_smiles)
del random_smiles
if len(gp_train_smiles_set) > 0:
logger.debug(
f"Plan to condition GP on {len(gp_train_smiles_set)} training points."
)
# Evaluate scores of training data (ideally should all be known)
num_train_data_not_known = len(gp_train_smiles_set - set(start_cache.keys()))
if num_train_data_not_known > 0:
logger.warning(
f"Need to evaluate {num_train_data_not_known} unknown GP training points."
" Probably the training points should have known scores which should be provided."
)
logger.debug("Scoring training points.")
gp_train_smiles_list = list(gp_train_smiles_set)
gp_train_smiles_scores = scoring_function(gp_train_smiles_list, batch=True)
logger.debug("Scoring of training points done.")
# Store GP training data
x_train_np = np.stack(
list(map(smiles_to_np_fingerprint, gp_train_smiles_list))
).astype(numpy_dtype)
y_train_np = np.array(gp_train_smiles_scores).astype(numpy_dtype)
gp_model.set_train_data(
inputs=torch.as_tensor(x_train_np),
targets=torch.as_tensor(y_train_np),
strict=False,
)
logger.debug("Created initial GP training data")
# State variables for BO loop
carryover_smiles_pool = set()
bo_query_res = list()
bo_state_dict = dict(
gp_model=gp_model,
gp_train_smiles_list=gp_train_smiles_list,
bo_query_res=bo_query_res,
scoring_function=scoring_function,
)
# Possibly re-fit GP hyperparameters
if refit_gp_func is not None:
logger.info("Initial fitting of GP hyperparameters")
refit_gp_func(bo_iter=0, gp_model=gp_model, bo_state_dict=bo_state_dict)
# Actual BO loop
for bo_iter in range(1, max_bo_iter + 1):
logger.info(f"Start iter {bo_iter}")
# Make starting population for GA from a combination of
# 1) best `ga_pool_num_best` known scores
# 2) Up to `ga_pool_num_carryover` promising SMILES from last iteration
# 3) Random smiles from `smiles_pool` to pad the pool
top_smiles_at_bo_iter_start = [
s
for _, s in heapq.nlargest(
ga_pool_num_best,
[
(scoring_function(smiles), smiles)
for smiles in scoring_function.cache.keys()
],
)
]
ga_start_smiles = set(top_smiles_at_bo_iter_start) # start with best
ga_start_smiles.update(carryover_smiles_pool) # add carryover
if len(ga_start_smiles) < max_ga_start_population_size:
samples_from_pool = random.sample(
smiles_pool, min(len(smiles_pool), max_ga_start_population_size)
)
# Pad with random SMILES until full
for s in samples_from_pool:
ga_start_smiles.add(s)
if len(ga_start_smiles) >= max_ga_start_population_size:
break
del samples_from_pool
# Current acquisition function
curr_acq_func = acq_func_of_time(bo_iter, bo_state_dict)
# Optimize acquisition function
logger.debug(
f"Maximizing acqusition function with {len(ga_start_smiles)} starting SMILES."
)
acq_smiles, acq_vals = maximize_acquisition_func_ga(
gp_model=gp_model,
acq_func_np=curr_acq_func,
starting_smiles=list(ga_start_smiles),
smiles_to_np_fingerprint=smiles_to_np_fingerprint,
max_generations=ga_max_generations,
population_size=ga_population_size,
offspring_size=ga_offspring_size,
mutation_rate=ga_mutation_rate,
num_cpu=ga_num_cpu,
)
logger.debug(f"Acquisition function optimized, {len(acq_smiles)} evaluated.")
_n_top = max(n_top_log, bo_batch_size + 3)
logger.debug(
f"Top {_n_top} acquisition function values: "
+ ", ".join([f"{v:.2f}" for v in acq_vals[:_n_top]])
)
del _n_top
# Now that new SMILES were generated, add them to the pool
_start_size = len(smiles_pool)
smiles_pool.update(acq_smiles)
_end_size = len(smiles_pool)
logger.debug(
f"{_end_size - _start_size} smiles added to pool "
f"(size went from {_start_size} to {_end_size})"
)
del _start_size, _end_size
# Greedily choose SMILES to be in the BO batch
smiles_batch = []
smiles_batch_acq = []
for candidate_smiles, acq in zip(acq_smiles, acq_vals):
if (
candidate_smiles not in gp_train_smiles_set
and candidate_smiles not in smiles_batch
):
smiles_batch.append(candidate_smiles)
smiles_batch_acq.append(acq)
if len(smiles_batch) >= bo_batch_size:
break
del candidate_smiles, acq
logger.debug(f"Batch created, size {len(smiles_batch)}/{bo_batch_size}")
assert (
len(smiles_batch) > 0
), "Empty batch, shouldn't happen. Must be problem with GA."
smiles_batch_np = np.stack(
list(map(smiles_to_np_fingerprint, smiles_batch))
).astype(x_train_np.dtype)
# Get predictions about SMILES batch before training on it
smiles_batch_mu_pre, smiles_batch_var_pre = batch_predict_mu_var_numpy(
gp_model, torch.as_tensor(smiles_batch_np)
)
logger.debug("Made mean/var predictions for new SMILES batch")
# Score these SMILES
logger.debug(
f"Evaluating scoring function on SMILES batch of size {len(smiles_batch)}."
)
smiles_batch_scores = scoring_function(smiles_batch, batch=True)
logger.debug(f"Scoring complete.")
# Add new points to GP training data
gp_train_smiles_list += smiles_batch
gp_train_smiles_set.update(gp_train_smiles_list)
x_train_np = np.concatenate([x_train_np, smiles_batch_np], axis=0)
y_train_np = np.concatenate(
[y_train_np, np.asarray(smiles_batch_scores, dtype=y_train_np.dtype)],
axis=0,
)
gp_model.set_train_data(
inputs=torch.as_tensor(x_train_np),
targets=torch.as_tensor(y_train_np),
strict=False,
)
logger.debug(f"GP training data reset, now of size {len(x_train_np)}")
# Potentially refit GP hyperparameters
if refit_gp_func is not None:
logger.info("Re-fitting GP hyperparameters")
refit_gp_func(
bo_iter=bo_iter, gp_model=gp_model, bo_state_dict=bo_state_dict
)
# Add SMILES with high acquisition function values to the priority pool,
# Since maybe they will have high acquisition function values next time
carryover_smiles_pool = set()
for s in acq_smiles:
if (
len(carryover_smiles_pool) < ga_pool_num_carryover
and s not in gp_train_smiles_set
):
carryover_smiles_pool.add(s)
else:
break
# Get predictions about SMILES batch AFTER training on it
smiles_batch_mu_post1, smiles_batch_var_post1 = batch_predict_mu_var_numpy(
gp_model, torch.as_tensor(smiles_batch_np)
)
# Assemble full batch results
batch_results = []
for i, s in enumerate(smiles_batch):
transformed_score = scoring_function(s, batch=False)
pred_dict = dict(
mu=float(smiles_batch_mu_pre[i]),
std=float(np.sqrt(smiles_batch_var_pre[i])),
acq=smiles_batch_acq[i],
)
pred_dict["pred_error_in_stds"] = (
pred_dict["mu"] - transformed_score
) / pred_dict["std"]
pred_dict_post1 = dict(
mu=float(smiles_batch_mu_post1[i]),
std=float(np.sqrt(smiles_batch_var_post1[i])),
)
res = dict(
bo_iter=bo_iter,
smiles=s,
raw_score=scoring_function.cache[s],
transformed_score=transformed_score,
predictions=pred_dict,
predictions_after_fit=pred_dict_post1,
)
batch_results.append(res)
del pred_dict, pred_dict_post1, res, transformed_score
bo_query_res.extend(batch_results)
logger.debug("Full batch results:\n" + pprint.pformat(batch_results))
# Potentially add GA info to batch
if log_ga_smiles:
batch_results[0]["ga_info"] = dict(
ga_start_smiles=ga_start_smiles,
ga_eval_smiles=acq_smiles,
)
# Log batch information
bo_iter_status_update = f"End of iter {bo_iter}. Status update:"
_batch_argsort = np.argsort(
-np.asarray([float(r["transformed_score"]) for r in batch_results])
)
# bo_iter_status_update += "\n\tBatch scores (raw): "
# bo_iter_status_update += ", ".join([str(r["raw_score"]) for r in batch_results])
bo_iter_status_update += "\n\tBatch scores (transformed): "
bo_iter_status_update += ", ".join(
[str(batch_results[pos]["transformed_score"]) for pos in _batch_argsort]
)
bo_iter_status_update += "\n\tBatch acquisition function values: "
bo_iter_status_update += ", ".join(
f"{smiles_batch_acq[pos]:.2e}" for pos in _batch_argsort
)
bo_iter_status_update += (
"\n\tAcquisition function values of top known smiles : "
)
_acq_val_dict = dict(zip(acq_smiles, acq_vals))
bo_iter_status_update += ", ".join(
f"{_acq_val_dict[s]:.2e}" for s in top_smiles_at_bo_iter_start[:n_top_log]
)
del _acq_val_dict, _batch_argsort
# Overall progress towards optimizing function
new_bo_smiles = [
r["smiles"] for r in bo_query_res if r["smiles"] not in | |
<reponame>armel/RRFDisplay
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
'''
RRFDisplay version Raspberry Pi 3B et Orange Pi Zero
Learn more about RRF on https://f5nlg.wordpress.com
Check video about RRFDisplay on https://www.youtube.com/watch?v=rVW8xczVpEo
73 & 88 de F4HWN Armel
'''
import settings as s
import lib as l
import time
import os
from luma.core.render import canvas
from luma.core import legacy
from PIL import ImageFont
'''
with canvas(s.device, dither=True) as draw:
if s.device.height > 160:
icon = ImageFont.truetype('./fonts/fontello.ttf', 14) # Icon font
font = ImageFont.truetype('./fonts/7x5.ttf', 8) # Text font
else:
icon = ImageFont.truetype('./fonts/fontello.ttf', 14) # Icon font
font = ImageFont.truetype('./fonts/freepixel.ttf', 16) # Text font
font_big = ImageFont.truetype('./fonts/bold.ttf', 30) # Text font
font_tot = ImageFont.truetype('./fonts/rounded_led_board.ttf', 20) # Text font
'''
icon = ImageFont.truetype('./fonts/fontello.ttf', 14) # Icon font
font = ImageFont.truetype('./fonts/7x5.ttf', 8) # Text font
font_big = ImageFont.truetype('./fonts/bold.ttf', 30) # Text font
font_tot = ImageFont.truetype('./fonts/rounded_led_board.ttf', 20) # Text font
# Manage color
def get_color(section, value):
color = s.theme.get(section, value)
if color in s.color:
return s.color[color]
else:
return color
# Draw title
def title(draw, message, width=0, offset=0):
if width == 0:
width = s.device.width
w, h = draw.textsize(text=message, font=font)
tab = (width - w) / 2
draw.text((tab + offset, 4), message, font=font, fill=get_color('header', 'foreground'))
# Draw last call
def last(draw, call, width=0, offset=0):
if width == 0:
width = s.device.width
# Print last_call
i = 16
j = 1
for c in call:
if c is not '':
w, h = draw.textsize(text=c, font=font)
tab = (width - w) / 2
if j == 1:
color = get_color('log', 'call_last')
else:
color = get_color('log', 'call')
draw.text((tab + offset, i), c, font=font, fill=color)
legacy.text(draw, (16 + offset, i + 1), chr(s.letter[str(j)]), font=s.SMALL_BITMAP_FONT, fill=color)
if s.transmit is False:
k = 108
for l in s.call_time[j - 1][:5]:
legacy.text(draw, (k + offset, i + 1), chr(s.letter[l]), fill=get_color('header', 'foreground'), font=s.SMALL_BITMAP_FONT)
k += 4
i += h
j += 1
# Draw label
#label(draw, i, 42, get_color('label', 'background'), get_color('label', 'foreground'), s.iptable[j], s.iptable_by[j], 0, offset)
def label(draw, position, width, bg_color, fg_color, label, value, fixed=0, offset=0):
if s.device.height >= 128:
position += 3
draw.rectangle((0 + offset, position - 1, width + offset, position + 8), fill=bg_color)
draw.line((width + offset + 1, position, width + offset + 1, position + 7), fill=bg_color)
draw.line((width + offset + 2, position + 1, width + offset + 2, position + 6), fill=bg_color)
draw.line((width + offset + 3, position + 2, width + offset + 3, position + 5), fill=bg_color)
draw.line((width + offset + 4, position + 3, width + offset + 4, position + 4), fill=bg_color)
else:
draw.rectangle((0 + offset, position - 1, width + offset, position + 7), fill=bg_color)
draw.line((width + offset + 1, position, width + offset + 1, position + 6), fill=bg_color)
draw.line((width + offset + 2, position + 1, width + offset + 2, position + 5), fill=bg_color)
draw.line((width + offset + 3, position + 2, width + offset + 3, position + 4), fill=bg_color)
draw.line((width + offset + 4, position + 3, width + offset + 4, position + 3), fill=bg_color)
#draw.point((width + 4, position + 4), fill=bg_color)
draw.text((1 + offset, position), label, font=font, fill=fg_color)
if fixed == 0:
draw.text((width + offset + 10, position), value, font=font, fill=get_color('screen', 'foreground'))
else:
draw.text((fixed + offset, position), value, font=font, fill=get_color('screen', 'foreground'))
# Draw tot
def tot(draw, legacy, duration, position, width=0, offset=0):
if width == 0:
width = s.device.width
#duration += (duration / 60) # Reajust time latence
if s.device.height < 128:
j = 54
k = 11
duration_min = 0
timer = [i for i in range(60, 360, 60)]
for i in timer:
if duration < i:
duration_max = i
break
else:
duration_min = i
h = l.interpolation(duration, duration_min, duration_max, 0, 120)
draw.rectangle((0, j, 128, j - k), fill=get_color('screen', 'background'))
for i in range(3, h, 2):
draw.rectangle((i, j, i, j - k), fill=get_color('screen', 'foreground'))
for i in range(0, 128, 4):
draw.line((i, position, i + 1, position), fill=get_color('screen', 'foreground'))
# Duration min
tmp = list(str(duration_min))
msg = ''
for c in tmp:
msg += chr(s.letter[c])
legacy.text(draw, (0, position + 2), msg, fill=get_color('screen', 'foreground'), font=s.SMALL_BITMAP_FONT)
# Duration max
tmp = list(str(duration_max))
msg = ''
for c in tmp:
msg += chr(s.letter[c])
if duration_max < 100:
tab = 4
else:
tab = 0
legacy.text(draw, (115 + tab, position + 2), msg, fill=get_color('screen', 'foreground'), font=s.SMALL_BITMAP_FONT)
# duration
tmp = list(str(duration))
msg = ''
for c in tmp:
msg += chr(s.letter[c])
if duration < 10:
tab = 2
else:
tab = 0
legacy.text(draw, (60 + tab, position + 2), msg, fill=get_color('screen', 'foreground'), font=s.SMALL_BITMAP_FONT)
else:
tmp = l.convert_second_to_time(duration)
w, h = draw.textsize(text=tmp, font=font_tot)
tab = (width - w) / 2
draw.text((tab + offset, 57), tmp, font=font_tot, fill=get_color('screen', 'foreground'))
# Print elsewhere
def elsewhere(draw, data, offset=0):
draw.rectangle((0 + offset, 77, 127 + offset, 127), outline=get_color('elsewhere', 'border'), fill=get_color('elsewhere', 'background'))
# Horizontal
for i in [87, 97, 107, 117]:
draw.line((0 + offset, i, 127 + offset, i), fill=get_color('elsewhere', 'border'))
i = 79
for d in data:
d = d.split('/')
if d[0] == '00:00':
draw.rectangle((21 + offset, i - 1, 126 + offset, i + 7), fill=get_color('elsewhere', 'background'))
if 'h' in d[2]:
draw.text((28 + offset, i), d[2], font=font, fill=get_color('elsewhere', 'foreground'))
else:
draw.text((48 + offset, i), d[2], font=font, fill=get_color('elsewhere', 'foreground'))
draw.text((100 + offset, i), d[3], font=font, fill=get_color('elsewhere', 'foreground'))
else:
draw.rectangle((21 + offset, i - 1, 126 + offset, i + 7), fill=get_color('elsewhere', 'background_active'))
draw.text((28 + offset, i), d[2], font=font, fill=get_color('elsewhere', 'foreground_active'))
draw.text((100 + offset, i), d[3], font=font, fill=get_color('elsewhere', 'foreground_active'))
draw.rectangle((1 + offset, i - 1, 19 + offset, i + 7), fill=get_color('elsewhere', 'background_active'))
draw.text((2 + offset, i), d[1], font=font, fill=get_color('elsewhere', 'foreground_active'))
i += 10
# Vertical
draw.line((20 + offset, 77, 20 + offset, 127), fill=get_color('elsewhere', 'border'))
draw.line((94 + offset, 77, 94 + offset, 127), fill=get_color('elsewhere', 'border'))
# Print whois
def whois(draw, offset=0):
draw.rectangle((0 + offset, 77, 127 + offset, 127), outline=get_color('whois', 'border'), fill=get_color('whois', 'background'))
draw.rectangle((1 + offset, 78, 47 + offset, 126), fill=get_color('whois', 'background_active'))
# Vertical
draw.line((48 + offset, 77, 48 + offset, 127), fill=get_color('whois', 'border'))
# Horizontal
for i in [87, 97, 107, 117]:
draw.line((0 + offset, i, 127 + offset, i), fill=get_color('whois', 'border'))
draw.text((2 + offset, 79), 'Type', font=font, fill=get_color('whois', 'foreground_active'))
draw.text((50 + offset, 79), s.call_type, font=font, fill=get_color('whois', 'foreground'))
draw.text((2 + offset, 89), 'Detail', font=font, fill=get_color('whois', 'foreground_active'))
if len(s.call_description) > 14:
draw.text((50 + offset, 89), s.call_description[:14] + '...', font=font, fill=get_color('whois', 'foreground'))
else:
draw.text((50 + offset, 89), s.call_description, font=font, fill=get_color('whois', 'foreground'))
draw.text((2 + offset, 99), 'Tone', font=font, fill=get_color('whois', 'foreground_active'))
draw.text((50 + offset, 99), s.call_tone, font=font, fill=get_color('whois', 'foreground'))
draw.text((2 + offset, 109), 'Locator', font=font, fill=get_color('whois', 'foreground_active'))
draw.text((50 + offset, 109), s.call_locator, font=font, fill=get_color('whois', 'foreground'))
if s.call_sysop == '':
if s.call_prenom != '':
draw.text((2 + offset, 119), 'Prenom', font=font, fill=get_color('whois', 'foreground_active'))
draw.text((50 + offset, 119), s.call_prenom, font=font, fill=get_color('whois', 'foreground'))
else:
draw.text((2 + offset, 119), 'Sysop', font=font, fill=get_color('whois', 'foreground_active'))
draw.text((50 + offset, 119), s.call_sysop, font=font, fill=get_color('whois', 'foreground'))
else:
draw.text((2 + offset, 119), 'Sysop', font=font, fill=get_color('whois', 'foreground_active'))
draw.text((50 + offset, 119), s.call_sysop, font=font, fill=get_color('whois', 'foreground'))
# Draw histogram
def histogram(draw, legacy, position, height=15, offset=0):
qso_hour_max = max(s.qso_hour)
i = 5
j = 100
for (t, q) in enumerate(s.qso_hour):
if q != 0:
h = l.interpolation(q, 0, qso_hour_max, 0, height)
else:
h = 0
draw.rectangle((0 + offset + i, position, i + offset + 2, (position - height)), fill=get_color('screen', 'background'))
if t == s.hour:
color = get_color('histogram', 'column_current')
else:
color = get_color('histogram', 'column')
draw.rectangle((0 + offset + i, position, i + offset + 2, (position - h)), fill=color)
j += 5
i += 5
for i, j, k in [(1, 0, 0), (33, 0, 6), (63, 1, 2), (93, 1, 8), (120, 2, 3)]:
legacy.text(draw, (i + offset, position + 2), chr(j) + chr(k), fill=get_color('histogram', 'legend'), font=s.SMALL_BITMAP_FONT)
# Print clock and room
def clock_room(draw, offset=0):
j = 5
# Print Room
if s.seconde % 5 != 0:
i = 116
for c in s.room_current[:3]:
legacy.text(draw, (i + offset, j), chr(s.letter[c]), fill=get_color('header', 'foreground'), font=s.SMALL_BITMAP_FONT)
i += 4
# Print Clock
else:
i = 108
for c in s.now[:5]:
legacy.text(draw, (i + offset, j), chr(s.letter[c]), fill=get_color('header', 'foreground'), font=s.SMALL_BITMAP_FONT)
i += 4
# Print distance
def distance(draw, | |
np.cos(drive_cell[t - 1]))) * ((1.0 - s_de[:, t - 1]) / self.tau_R))
s_db[:, t] = s_db[:, t - 1] + self.dt * (-1.0 * (s_db[:, t - 1] / self.tau_ex) + np.exp(
-1.0 * self.eta * (1 + np.cos(drive_cell[t - 1]))) * ((1.0 - s_db[:, t - 1]) / self.tau_R))
s_dc[:, t] = s_dc[:, t - 1] + self.dt * (-1.0 * (s_dc[:, t - 1] / self.tau_ex) + np.exp(
-1.0 * self.eta * (1 + np.cos(drive_cell[t - 1]))) * ((1.0 - s_dc[:, t - 1]) / self.tau_R))
# calculate total synaptic input
S_ex[:, t] = self.g_ee * np.sum(s_ee[:, :, t - 1], axis=0) - self.g_be * np.sum(s_be[:, :, t - 1],
axis=0) - self.g_ce * np.sum(
s_ce[:, :, t - 1], axis=0) + self.g_de * s_de[:, t - 1]
S_bask[:, t] = self.g_eb * np.sum(s_eb[:, :, t - 1], axis=0) - self.g_bb * np.sum(s_bb[:, :, t - 1],
axis=0) + self.g_db * s_db[
:,
t - 1]
S_chand[:, t] = self.g_ec * np.sum(s_ec[:, :, t - 1], axis=0) - self.g_cc * np.sum(s_cc[:, :, t - 1],
axis=0) - self.g_bc * np.sum(
s_bc[:, :, t - 1], axis=0) + self.g_dc * s_dc[:, t - 1]
meg[:, t] = self.g_ee * np.sum(s_ee[:, :, t - 1], axis=0) # + self.g_de*s_de[:,t-1]
# evolve drive cell
drive_cell[t] = drive_cell[t - 1] + self.dt * (
(1 - np.cos(drive_cell[t - 1])) + b_drive * (1 + np.cos(drive_cell[t - 1])))
# evolve theta
theta_ex[:, t] = theta_ex[:, t - 1] + self.dt * (
(1 - np.cos(theta_ex[:, t - 1])) + (B_ex + S_ex[:, t] + N_ex[:, t]) * (
1 + np.cos(theta_ex[:, t - 1])))
theta_bask[:, t] = theta_bask[:, t - 1] + self.dt * (
(1 - np.cos(theta_bask[:, t - 1])) + (B_bask + S_bask[:, t] + N_bask[:, t]) * (
1 + np.cos(theta_bask[:, t - 1])))
theta_chand[:, t] = theta_chand[:, t - 1] + self.dt * (
(1 - np.cos(theta_chand[:, t - 1])) + (B_chand + S_chand[:, t] + N_chand[:, t]) * (
1 + np.cos(theta_chand[:, t - 1])))
# Sum EPSCs of excitatory cells
MEG = np.sum(meg, axis=0)
if saveMEG:
filenameMEG = self.directory + self.filename + '-MEG.npy'
np.save(filenameMEG, MEG)
if saveEX:
filenameEX = self.directory + self.filename + '-Ex.npy'
np.save(filenameEX, theta_ex)
if saveBASK:
filenameBASK = self.directory + self.filename + '-Bask.npy'
np.save(filenameBASK, theta_bask)
if saveCHAND:
filenameCHAND = self.directory + self.filename + '-Chand.npy'
np.save(filenameCHAND, theta_chand)
return MEG, theta_ex, theta_bask, theta_chand
def plotTrace(self, trace, sim_time, save):
"""
Plots a trace signal versus time
Parameters:
trace: the trace signal to plot
sim_time: the duration of the simulation
"""
fig = plt.figure()
ax = fig.add_subplot(111)
time = np.linspace(0, sim_time, int(sim_time / self.dt) + 1)
ax.plot(time, trace, 'k')
# plt.show()
def plotMEG(self, MEG, sim_time, save):
"""
Plots a simulated MEG signal versus time
Parameters:
MEG: the simulated MEG signal to plot
sim_time: the duration of the simulation
"""
fig = plt.figure()
ax = fig.add_subplot(111)
time = np.linspace(0, sim_time, int(sim_time / self.dt) + 1)
ax.plot(time, MEG, 'k')
if save:
filenamepng = self.directory + self.filename + '-MEG.png'
# print filenamepng
plt.savefig(filenamepng, dpi=600)
# plt.show()
def rasterPlot(self, data, sim_time, save, name):
"""
Plots a raster plot for an array of spike trains
Parameters:
data: array of spike trains
sim_time: duration of the simulation
"""
spiketrains = self._getSpikeTimes(data)
fig = plt.figure()
ax = fig.add_subplot(111)
for i, times in enumerate(spiketrains):
y = [i] * len(times)
ax.plot(times, y, linestyle='None', color='k', marker='|', markersize=10)
ax.axis([0, sim_time, -0.5, len(spiketrains)])
if save:
filenamepng = self.directory + self.filename + '-' + name + '-raster.png'
# print filenamepng
plt.savefig(filenamepng, dpi=600)
# plt.show()
def calculatePSD(self, meg, sim_time):
"""
Calculates the power spectral density of a simulated MEG signal
Parameters:
meg: the simulated MEG signal
sim_time: the duration of the simulation
"""
# fourier sample rate
fs = 1. / self.dt
tn = np.linspace(0, sim_time, int(sim_time / self.dt) + 1)
npts = len(meg)
startpt = int(0.2 * fs)
if (npts - startpt) % 2 != 0:
startpt = startpt + 1
meg = meg[startpt:]
tn = tn[startpt:]
nfft = len(tn)
pxx, freqs = mlab.psd(meg, NFFT=nfft, Fs=fs, noverlap=0, window=mlab.window_none)
pxx[0] = 0.0
return pxx, freqs
def plotPSD(self, freqs, psd, fmax, save):
"""
Plots the power spectral density of a simulated MEG signal
Parameters:
freqs: frequency vector
psd: power spectral density vector
fmax: maximum frequency to display
"""
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(freqs, psd)
ax.axis(xmin=0, xmax=fmax)
if save:
filenamepng = self.directory + self.filename + '-PSD.png'
# print filenamepng
plt.savefig(filenamepng, dpi=600)
return ax
def _getSingleSpikeTimes(self, neuron):
"""
Calculates the spike times from the trace of a single theta neuron
Parameters:
neuron: the single neuron trace
"""
spike_times = []
old = 0.0
for i, n in enumerate(neuron):
# if theta passes (2l-1)*pi, l integer, with dtheta/dt>0 then the neuron spikes (see Boergers and Kopell, 2003)
if (n % (2 * np.pi)) > np.pi and (old % (2 * np.pi)) < np.pi:
spike_time = i * self.dt
spike_times.append(spike_time)
old = n
return spike_times
def _getSpikeTimes(self, data):
'''
Calculates the spike times from an array of theta neuron traces
Parameters:
data: the traces array
'''
nx, ny = data.shape
spike_times_array = [None] * nx
for i in range(nx):
spike_times_array[i] = self._getSingleSpikeTimes(data[i, :])
return spike_times_array
def _noise(self, t, tn):
t = t * self.dt
if t - tn > 0:
value = (self.A * (np.exp(-(t - tn) / self.tau_ex) - np.exp(-(t - tn) / self.tau_R))) / (
self.tau_ex - self.tau_R)
else:
value = 0
return value
class ChandelierSimpleModel(sciunit.Model, ProduceXY):
"""The extended simple chandelier model from Vierling-Claassen et al. (2008) """
def __init__(
self,
controlparams,
schizparams,
seed=12345,
time=500,
name="ChandelierSimpleModel",
):
self.controlparams = controlparams
self.schizparams = schizparams
self.time = time
self.name = name
self.seed = seed
super(ChandelierSimpleModel, self).__init__(
name=name,
controlparams=controlparams,
schizparams=schizparams,
seed=seed,
time=time,
)
def produce_XY(self, stimfrequency=40.0, powerfrequency=40.0):
lbound = int((powerfrequency / 2) - 1)
ubound = int((powerfrequency / 2) + 2)
# generate the control network and run simulation
control_model = SimpleModelExtended(self.controlparams)
print("Control model created")
control_meg, _, _, _ = control_model.run(
stimfrequency, self.seed, self.time, 0, 0, 0, 0
)
print("Control model simulated")
control_pxx, freqs = control_model.calculatePSD(control_meg, self.time)
print("Control PSD calculated")
# Frequency range from 38-42Hz
controlXY = np.sum(control_pxx[lbound:ubound])
# generate the schizophrenia-like network and run simulation
schiz_model = SimpleModelExtended(self.schizparams)
print("Schiz model created")
schiz_meg, _, _, _ = schiz_model.run(stimfrequency, self.seed, self.time, 0, 0, 0, 0)
print("Schiz model simulated")
schiz_pxx, freqs = schiz_model.calculatePSD(schiz_meg, self.time)
print("Schiz PSD calculated")
# Frequency range from 38-42Hz
schizXY = np.sum(schiz_pxx[lbound:ubound])
return [controlXY, schizXY]
class ChandelierSimpleModelRobust(sciunit.Model, ProduceXY):
"""The extended simple chandelier model from Vierling-Claassen et al. (2008) """
def __init__(self, controlparams, schizparams, seeds, time=500, name=None):
self.controlparams = controlparams
self.schizparams = schizparams
self.time = time
self.name = name
self.seeds = seeds
super(ChandelierSimpleModelRobust, self).__init__(
name=name,
controlparams=controlparams,
schizparams=schizparams,
time=time,
seeds=seeds,
)
def produce_XY(self, stimfrequency=40.0, powerfrequency=40.0):
"""
Simulates Y Hz drive to the control and the schizophrenia-like network for all
random seeds, calculates a Fourier transform of the simulated MEG
and extracts the power in the X Hz frequency band for each simulation.
Returns the mean power for the control and the schizophrenia-like network, respectively.
"""
lbound = (powerfrequency / 2) - 1
ubound = (powerfrequency / 2) + 2
controlXY = np.zeros((len(self.seeds),))
schizXY = np.zeros((len(self.seeds),))
for i, s in enumerate(self.seeds):
print("Seed number:", i)
# generate the control network and run simulation
control_model = SimpleModelExtended(self.controlparams)
print("Control model created")
control_meg, _, _, _ = control_model.run(stimfrequency, s, self.time, 0, 0, 0, 0)
print("Control model simulated")
control_pxx, freqs = control_model.calculatePSD(control_meg, self.time)
print("Control PSD calculated")
controlXY[i] = np.sum(control_pxx[int(lbound):int(ubound)])
# generate the schizophrenia-like network and run simulation
schiz_model = SimpleModelExtended(self.schizparams)
print("Schiz model created")
schiz_meg, _, _, _= schiz_model.run(stimfrequency, s, self.time, 0, 0, 0, 0)
print("Schiz model simulated")
schiz_pxx, freqs = schiz_model.calculatePSD(schiz_meg, self.time)
print("Schiz PSD calculated")
schizXY[i] = np.sum(schiz_pxx[int(lbound):int(ubound)])
mcontrolXY = np.mean(controlXY)
mschizXY = np.mean(schizXY)
return [mcontrolXY, mschizXY]
def produce_XY_plus(self, stimfrequency=40.0, powerfrequency=40.0):
"""
Simulates Y Hz drive to the control and the schizophrenia-like network for
all random | |
+f on the 2-dimensional topological manifold M
sage: g == f
True
"""
result = type(self)(self.parent())
for chart in self._express:
result._express[chart] = + self._express[chart]
if self._name is not None:
result._name = '+' + self._name
if self._latex_name is not None:
result._latex_name = '+' + self._latex_name
return result
def __neg__(self):
r"""
Unary minus operator.
OUTPUT:
- the negative of the scalar field
TESTS::
sage: M = Manifold(2, 'M', structure='topological')
sage: X.<x,y> = M.chart()
sage: f = M.scalar_field({X: x+y}, name='f')
sage: g = f.__neg__(); g
Scalar field -f on the 2-dimensional topological manifold M
sage: g.display()
-f: M --> R
(x, y) |--> -x - y
sage: g.__neg__() == f
True
"""
result = type(self)(self.parent())
for chart in self._express:
result._express[chart] = - self._express[chart]
if self._name is not None:
result._name = '-' + self._name
if self._latex_name is not None:
result._latex_name = '-' + self._latex_name
return result
######### CommutativeAlgebraElement arithmetic operators ########
def _add_(self, other):
r"""
Scalar field addition.
INPUT:
- ``other`` -- a scalar field (in the same algebra as ``self``)
OUTPUT:
- the scalar field resulting from the addition of ``self`` and
``other``
TESTS::
sage: M = Manifold(2, 'M', structure='topological')
sage: X.<x,y> = M.chart()
sage: f = M.scalar_field({X: x+y}, name='f')
sage: g = M.scalar_field({X: x*y}, name='g')
sage: s = f._add_(g); s
Scalar field f+g on the 2-dimensional topological manifold M
sage: s.display()
f+g: M --> R
(x, y) |--> (x + 1)*y + x
sage: s == f+g
True
sage: f._add_(M.zero_scalar_field()) == f
True
"""
# Special cases:
if self._is_zero:
return other
if other._is_zero:
return self
# Generic case:
com_charts = self.common_charts(other)
if com_charts is None:
raise ValueError("no common chart for the addition")
result = type(self)(self.parent())
for chart in com_charts:
# ChartFunction addition:
result._express[chart] = self._express[chart] + other._express[chart]
if self._name is not None and other._name is not None:
result._name = self._name + '+' + other._name
if self._latex_name is not None and other._latex_name is not None:
result._latex_name = self._latex_name + '+' + other._latex_name
return result
def _sub_(self, other):
r"""
Scalar field subtraction.
INPUT:
- ``other`` -- a scalar field (in the same algebra as ``self``)
OUTPUT:
- the scalar field resulting from the subtraction of ``other`` from
``self``
TESTS::
sage: M = Manifold(2, 'M', structure='topological')
sage: X.<x,y> = M.chart()
sage: f = M.scalar_field({X: x+y}, name='f')
sage: g = M.scalar_field({X: x*y}, name='g')
sage: s = f._sub_(g); s
Scalar field f-g on the 2-dimensional topological manifold M
sage: s.display()
f-g: M --> R
(x, y) |--> -(x - 1)*y + x
sage: s == f-g
True
sage: f._sub_(M.zero_scalar_field()) == f
True
"""
# Special cases:
if self._is_zero:
return -other
if other._is_zero:
return self
# Generic case:
com_charts = self.common_charts(other)
if com_charts is None:
raise ValueError("no common chart for the subtraction")
result = type(self)(self.parent())
for chart in com_charts:
# ChartFunction subtraction:
result._express[chart] = self._express[chart] - other._express[chart]
if self._name is not None and other._name is not None:
result._name = self._name + '-' + other._name
if self._latex_name is not None and other._latex_name is not None:
result._latex_name = self._latex_name + '-' + other._latex_name
return result
def _mul_(self, other):
r"""
Scalar field multiplication.
INPUT:
- ``other`` -- a scalar field (in the same algebra as ``self``)
OUTPUT:
- the scalar field resulting from the multiplication of ``self`` by
``other``
TESTS::
sage: M = Manifold(2, 'M', structure='topological')
sage: X.<x,y> = M.chart()
sage: f = M.scalar_field({X: x+y}, name='f')
sage: g = M.scalar_field({X: x*y}, name='g')
sage: s = f._mul_(g); s
Scalar field f*g on the 2-dimensional topological manifold M
sage: s.display()
f*g: M --> R
(x, y) |--> x^2*y + x*y^2
sage: s == f*g
True
sage: f._mul_(M.zero_scalar_field()) == M.zero_scalar_field()
True
sage: f._mul_(M.one_scalar_field()) == f
True
"""
from sage.tensor.modules.format_utilities import (format_mul_txt,
format_mul_latex)
# Special cases:
if self._is_zero or other._is_zero:
return self._domain.zero_scalar_field()
# Generic case:
com_charts = self.common_charts(other)
if com_charts is None:
raise ValueError("no common chart for the multiplication")
result = type(self)(self.parent())
for chart in com_charts:
# ChartFunction multiplication:
result._express[chart] = self._express[chart] * other._express[chart]
result._name = format_mul_txt(self._name, '*', other._name)
result._latex_name = format_mul_latex(self._latex_name, r' \cdot ',
other._latex_name)
return result
def _div_(self, other):
r"""
Scalar field division.
INPUT:
- ``other`` -- a scalar field (in the same algebra as self)
OUTPUT:
- the scalar field resulting from the division of ``self`` by
``other``
TESTS::
sage: M = Manifold(2, 'M', structure='topological')
sage: X.<x,y> = M.chart()
sage: f = M.scalar_field({X: x+y}, name='f')
sage: g = M.scalar_field({X: x*y}, name='g')
sage: s = f._div_(g); s
Scalar field f/g on the 2-dimensional topological manifold M
sage: s.display()
f/g: M --> R
(x, y) |--> (x + y)/(x*y)
sage: s == f/g
True
sage: f._div_(M.zero_scalar_field())
Traceback (most recent call last):
...
ZeroDivisionError: division of a scalar field by zero
"""
from sage.tensor.modules.format_utilities import format_mul_txt, \
format_mul_latex
# Special cases:
if other._is_zero:
raise ZeroDivisionError("division of a scalar field by zero")
if self._is_zero:
return self._domain.zero_scalar_field()
# Generic case:
com_charts = self.common_charts(other)
if com_charts is None:
raise ValueError("no common chart for the division")
result = type(self)(self.parent())
for chart in com_charts:
# ChartFunction division:
result._express[chart] = self._express[chart] / other._express[chart]
result._name = format_mul_txt(self._name, '/', other._name)
result._latex_name = format_mul_latex(self._latex_name, '/',
other._latex_name)
return result
def _lmul_(self, number):
r"""
Scalar multiplication operator: return ``number * self`` or
``self * number``.
This differs from ``_mul_(self, other)`` by the fact that ``number``
is not assumed to be a scalar field defined on the same domain as
``self``, contrary to ``other`` in ``_mul_(self, other)``. In
practice, ``number`` is a an element of the field on which the
scalar field algebra is defined.
INPUT:
- ``number`` -- an element of the ring on which the scalar field
algebra is defined; this should be an element of the topological
field on which the manifold is constructed (possibly represented
by a symbolic expression)
OUTPUT:
- the scalar field ``number * self``
TESTS::
sage: M = Manifold(2, 'M', structure='topological')
sage: X.<x,y> = M.chart()
sage: f = M.scalar_field({X: x+y}, name='f')
sage: s = f._lmul_(2); s
Scalar field on the 2-dimensional topological manifold M
sage: s.display()
M --> R
(x, y) |--> 2*x + 2*y
sage: s == 2 * f
True
sage: s == f * 2
True
sage: f._lmul_(pi).display()
M --> R
(x, y) |--> pi*(x + y)
sage: f._lmul_(pi) == pi*f
True
sage: f._lmul_(0) == M.zero_scalar_field()
True
sage: f._lmul_(1) == f
True
"""
if number == 0:
return self.parent().zero()
result = type(self)(self.parent())
if isinstance(number, Expression):
var = number.variables() # possible symbolic variables in number
if var:
# There are symbolic variables in number
# Are any of them a chart coordinate ?
chart_var = False
for chart in self._express:
if any(s in chart[:] for s in var):
chart_var = True
break
if chart_var:
# Some symbolic variables in number are chart coordinates
for chart, expr in self._express.items():
# The multiplication is performed only if
# either
# (i) all the symbolic variables in number are
# coordinates of this chart
# or (ii) no symbolic variable in number belongs to a
# different chart
chart_coords = chart[:]
var_not_in_chart = [s for s in var
if not s in chart_coords]
any_in_other_chart = False
if var_not_in_chart != []:
for other_chart in self._domain.atlas():
other_chart_coords = other_chart[:]
for s in var_not_in_chart:
if s in other_chart_coords:
any_in_other_chart = True
break
if any_in_other_chart:
break
if not any_in_other_chart:
result._express[chart] = number * expr
return result
# General case: the multiplication is performed on all charts:
for chart, expr in self._express.items():
result._express[chart] = number * expr
return result
######### End of CommutativeAlgebraElement arithmetic operators ########
def _function_name(self, func, func_latex, parentheses=True):
r"""
Helper function to set the symbol of a function applied to the
scalar field.
TESTS::
sage: M = Manifold(2, 'M', structure='topological')
sage: X.<x,y> = M.chart()
sage: f = M.scalar_field({X: x+y}, name='f', latex_name=r"\Phi")
sage: f._function_name("cos", r"\cos")
('cos(f)', '\\cos\\left(\\Phi\\right)')
sage: f._function_name("sqrt", r"\sqrt", parentheses=False)
('sqrt(f)', '\\sqrt{\\Phi}')
sage: f = M.scalar_field({X: x+y}) # no name given to f
sage: f._function_name("cos", r"\cos")
(None, None)
"""
if self._name is None:
| |
import numpy as np
def length(x, axis=-1, keepdims=True):
"""
Computes vector norm along a tensor axis(axes)
:param x: tensor
:param axis: axis(axes) along which to compute the norm
:param keepdims: indicates if the dimension(s) on axis should be kept
:return: The length or vector of lengths.
"""
lgth = np.sqrt(np.sum(x * x, axis=axis, keepdims=keepdims))
return lgth
def normalize(x, axis=-1, eps=1e-8):
"""
Normalizes a tensor over some axis (axes)
:param x: data tensor
:param axis: axis(axes) along which to compute the norm
:param eps: epsilon to prevent numerical instabilities
:return: The normalized tensor
"""
res = x / (length(x, axis=axis) + eps)
return res
def quat_normalize(x, eps=1e-8):
"""
Normalizes a quaternion tensor
:param x: data tensor
:param eps: epsilon to prevent numerical instabilities
:return: The normalized quaternions tensor
"""
res = normalize(x, eps=eps)
return res
def quat_getDif(x, y, eps=1e-8):
"""
Normalizes a quaternion tensor
:param x: data tensor 1
:param y: data tensor 2
:return: The difference quaternion betweeen both quaternions
"""
return quat_normalize(quat_mul(quat_inv(x),y))
def angle_axis_to_quat(angle, axis):
"""
Converts from and angle-axis representation to a quaternion representation
:param angle: angles tensor
:param axis: axis tensor
:return: quaternion tensor
"""
c = np.cos(angle / 2.0)[..., np.newaxis]
s = np.sin(angle / 2.0)[..., np.newaxis]
q = np.concatenate([c, s * axis], axis=-1)
return q
def euler_to_quat(e, order='zyx'):
"""
Converts from an euler representation to a quaternion representation
:param e: euler tensor
:param order: order of euler rotations
:return: quaternion tensor
"""
axis = {
'x': np.asarray([1, 0, 0], dtype=np.float32),
'y': np.asarray([0, 1, 0], dtype=np.float32),
'z': np.asarray([0, 0, 1], dtype=np.float32)}
q0 = angle_axis_to_quat(e[..., 0], axis[order[0]])
q1 = angle_axis_to_quat(e[..., 1], axis[order[1]])
q2 = angle_axis_to_quat(e[..., 2], axis[order[2]])
return quat_mul(q0, quat_mul(q1, q2))
def quat_to_euler(q):
"""
Converts from an quaternion representation to a euler representation
:param q: quaterion tensor
:param order: order of euler rotations
:return: euler tensor (x-y-z order)
"""
phi = np.arctan2(2 * (q[..., 0] * q[..., 1] + q[..., 2] * q[..., 3]), 1 - 2 * (q[..., 1]**2 + q[..., 2]**2))
theta = np.arcsin(2 * (q[..., 0] * q[..., 2] + q[..., 3] * q[..., 1]))
psi = np.arctan2(2 * (q[..., 0] * q[..., 3] + q[..., 1] * q[..., 2]), 1 - 2 * (q[..., 2]**2 + q[..., 3]**2))
return np.stack([phi, theta, psi], axis = -1)
def quat_inv(q):
"""
Inverts a tensor of quaternions
:param q: quaternion tensor
:return: tensor of inverted quaternions
"""
res = np.asarray([1, -1, -1, -1], dtype=np.float32) * q
return res
def quat_fk(lrot, lpos, parents):
"""
Performs Forward Kinematics (FK) on local quaternions and local positions to retrieve global representations
:param lrot: tensor of local quaternions with shape (..., Nb of joints, 4)
:param lpos: tensor of local positions with shape (..., Nb of joints, 3)
:param parents: list of parents indices
:return: tuple of tensors of global quaternion, global positions
"""
gp, gr = [lpos[..., :1, :]], [lrot[..., :1, :]]
for i in range(1, len(parents)):
gp.append(quat_mul_vec(gr[parents[i]], lpos[..., i:i+1, :]) + gp[parents[i]])
gr.append(quat_mul (gr[parents[i]], lrot[..., i:i+1, :]))
res = np.concatenate(gr, axis=-2), np.concatenate(gp, axis=-2)
return res
def quat_ik(grot, gpos, parents):
"""
Performs Inverse Kinematics (IK) on global quaternions and global positions to retrieve local representations
:param grot: tensor of global quaternions with shape (..., Nb of joints, 4)
:param gpos: tensor of global positions with shape (..., Nb of joints, 3)
:param parents: list of parents indices
:return: tuple of tensors of local quaternion, local positions
"""
res = [
np.concatenate([
grot[..., :1, :],
quat_mul(quat_inv(grot[..., parents[1:], :]), grot[..., 1:, :]),
], axis=-2),
np.concatenate([
gpos[..., :1, :],
quat_mul_vec(
quat_inv(grot[..., parents[1:], :]),
gpos[..., 1:, :] - gpos[..., parents[1:], :]),
], axis=-2)
]
return res
def quat_mul(x, y):
"""
Performs quaternion multiplication on arrays of quaternions
:param x: tensor of quaternions of shape (..., Nb of joints, 4)
:param y: tensor of quaternions of shape (..., Nb of joints, 4)
:return: The resulting quaternions
"""
x0, x1, x2, x3 = x[..., 0:1], x[..., 1:2], x[..., 2:3], x[..., 3:4]
y0, y1, y2, y3 = y[..., 0:1], y[..., 1:2], y[..., 2:3], y[..., 3:4]
res = np.concatenate([
y0 * x0 - y1 * x1 - y2 * x2 - y3 * x3,
y0 * x1 + y1 * x0 - y2 * x3 + y3 * x2,
y0 * x2 + y1 * x3 + y2 * x0 - y3 * x1,
y0 * x3 - y1 * x2 + y2 * x1 + y3 * x0], axis=-1)
return res
def quat_mul_vec(q, x):
"""
Performs multiplication of an array of 3D vectors by an array of quaternions (rotation).
:param q: tensor of quaternions of shape (..., Nb of joints, 4)
:param x: tensor of vectors of shape (..., Nb of joints, 3)
:return: the resulting array of rotated vectors
"""
t = 2.0 * np.cross(q[..., 1:], x)
res = x + q[..., 0][..., np.newaxis] * t + np.cross(q[..., 1:], t)
return res
def quat_slerp(x, y, a):
"""
Perfroms spherical linear interpolation (SLERP) between x and y, with proportion a
:param x: quaternion tensor
:param y: quaternion tensor
:param a: indicator (between 0 and 1) of completion of the interpolation.
:return: tensor of interpolation results
"""
len = np.sum(x * y, axis=-1)
neg = len < 0.0
len[neg] = -len[neg]
y[neg] = -y[neg]
a = np.zeros_like(x[..., 0]) + a
amount0 = np.zeros(a.shape)
amount1 = np.zeros(a.shape)
linear = (1.0 - len) < 0.01
omegas = np.arccos(len[~linear])
sinoms = np.sin(omegas)
amount0[linear] = 1.0 - a[linear]
amount0[~linear] = np.sin((1.0 - a[~linear]) * omegas) / sinoms
amount1[linear] = a[linear]
amount1[~linear] = np.sin(a[~linear] * omegas) / sinoms
res = amount0[..., np.newaxis] * x + amount1[..., np.newaxis] * y
return res
def quat_between(x, y):
"""
Quaternion rotations between two 3D-vector arrays
:param x: tensor of 3D vectors
:param y: tensor of 3D vetcors
:return: tensor of quaternions
"""
res = np.concatenate([
np.sqrt(np.sum(x * x, axis=-1) * np.sum(y * y, axis=-1))[..., np.newaxis] +
np.sum(x * y, axis=-1)[..., np.newaxis],
np.cross(x, y, dim=-1)], axis=-1)
return res
def interpolate_local(lcl_r_mb, lcl_q_mb, n_past, n_future):
"""
Performs interpolation between 2 frames of an animation sequence.
The 2 frames are indirectly specified through n_past and n_future.
SLERP is performed on the quaternions
LERP is performed on the root's positions.
:param lcl_r_mb: Local/Global root positions (B, T, 1, 3)
:param lcl_q_mb: Local quaternions (B, T, J, 4)
:param n_past: Number of frames of past context
:param n_future: Number of frames of future context
:return: Interpolated root and quats
"""
# Extract last past frame and target frame
start_lcl_r_mb = lcl_r_mb[:, n_past - 1, :, :][:, None, :, :] # (B, 1, J, 3)
end_lcl_r_mb = lcl_r_mb[:, -n_future, :, :][:, None, :, :]
start_lcl_q_mb = lcl_q_mb[:, n_past - 1, :, :]
end_lcl_q_mb = lcl_q_mb[:, -n_future, :, :]
# LERP Local Positions:
n_trans = lcl_r_mb.shape[1] - (n_past + n_future)
interp_ws = np.linspace(0.0, 1.0, num=n_trans + 2, dtype=np.float32)
offset = end_lcl_r_mb - start_lcl_r_mb
const_trans = np.tile(start_lcl_r_mb, [1, n_trans + 2, 1, 1])
inter_lcl_r_mb = const_trans + (interp_ws)[None, :, None, None] * offset
# SLERP Local Quats:
interp_ws = np.linspace(0.0, 1.0, num=n_trans + 2, dtype=np.float32)
inter_lcl_q_mb = np.stack(
[(quat_normalize(quat_slerp(quat_normalize(start_lcl_q_mb), quat_normalize(end_lcl_q_mb), w))) for w in
interp_ws], axis=1)
return inter_lcl_r_mb, inter_lcl_q_mb
def remove_quat_discontinuities(rotations):
"""
Removing quat discontinuities on the time dimension (removing flips)
:param rotations: Array of quaternions of shape (T, J, 4)
:return: The processed array without quaternion inversion.
"""
rots_inv = -rotations
for i in range(1, rotations.shape[0]):
# Compare dot products
replace_mask = np.sum(rotations[i - 1: i] * rotations[i: i + 1], axis=-1) < np.sum(
rotations[i - 1: i] * rots_inv[i: i + 1], axis=-1)
replace_mask = replace_mask[..., np.newaxis]
rotations[i] = replace_mask * rots_inv[i] + (1.0 - replace_mask) * rotations[i]
return rotations
# Orient the data according to the las past keframe
def rotate_at_frame(X, Q, parents, n_past=10):
"""
Re-orients the animation data according to the last frame of past context.
:param X: tensor of local positions of shape (Batchsize, Timesteps, Joints, 3)
:param Q: tensor of local quaternions (Batchsize, Timesteps, Joints, 4)
:param parents: list | |
<reponame>chuckyin/Instruments
# coding=utf-8
# =============================================================================
# Copyright © 2017 FLIR Integrated Imaging Solutions, Inc. All Rights Reserved.
#
# This software is the confidential and proprietary information of FLIR
# Integrated Imaging Solutions, Inc. ("Confidential Information"). You
# shall not disclose such Confidential Information and shall use it only in
# accordance with the terms of the license agreement you entered into
# with FLIR Integrated Imaging Solutions, Inc. (FLIR).
#
# FLIR MAKES NO REPRESENTATIONS OR WARRANTIES ABOUT THE SUITABILITY OF THE
# SOFTWARE, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
# PURPOSE, OR NON-INFRINGEMENT. FLIR SHALL NOT BE LIABLE FOR ANY DAMAGES
# SUFFERED BY LICENSEE AS A RESULT OF USING, MODIFYING OR DISTRIBUTING
# THIS SOFTWARE OR ITS DERIVATIVES.
# =============================================================================
#
# ImageFormatControl_QuickSpin.py shows how to apply custom image
# settings to the camera using the QuickSpin API. QuickSpin is a subset of
# the Spinnaker library that allows for simpler node access and control.
#
# This example demonstrates customizing offsets X and Y, width and height,
# and the pixel format. Ensuring custom values fall within an acceptable
# range is also touched on. Retrieving and setting node values using
# QuickSpin is the only portion of the example that differs from
# ImageFormatControl.
#
# A much wider range of topics is covered in the full Spinnaker examples than
# in the QuickSpin ones. There are only enough QuickSpin examples to
# demonstrate node access and to get started with the API; please see full
# Spinnaker examples for further or specific knowledge on a topic.
import PySpin
NUM_IMAGES = 10 # number of images to grab
def configure_custom_image_settings(cam):
"""
Configures a number of settings on the camera including offsets X and Y,
width, height, and pixel format. These settings must be applied before
BeginAcquisition() is called; otherwise, those nodes would be read only.
Also, it is important to note that settings are applied immediately.
This means if you plan to reduce the width and move the x offset accordingly,
you need to apply such changes in the appropriate order.
:param cam: Camera to configure settings on.
:type cam: CameraPtr
:return: True if successful, False otherwise.
:rtype: bool
"""
print '\n*** CONFIGURING CUSTOM IMAGE SETTINGS ***\n'
try:
result = True
# Apply mono 8 pixel format
#
# *** NOTES ***
# In QuickSpin, enumeration nodes are as easy to set as other node
# types. This is because enum values representing each entry node
# are added to the API.
if cam.PixelFormat.GetAccessMode() == PySpin.RW:
cam.PixelFormat.SetValue(PySpin.PixelFormat_Mono8)
print 'Pixel format set to %s...' % cam.PixelFormat.GetCurrentEntry().GetSymbolic()
else:
print 'Pixel format not available...'
result = False
# Apply minimum to offset X
#
# *** NOTES ***
# Numeric nodes have both a minimum and maximum. A minimum is retrieved
# with the method GetMin(). Sometimes it can be important to check
# minimums to ensure that your desired value is within range.
if cam.OffsetX.GetAccessMode() == PySpin.RW:
cam.OffsetX.SetValue(cam.OffsetX.GetMin())
print 'Offset X set to %d...' % cam.OffsetX.GetValue()
else:
print 'Offset X not available...'
result = False
# Apply minimum to offset Y
#
# *** NOTES ***
# It is often desirable to check the increment as well. The increment
# is a number of which a desired value must be a multiple. Certain
# nodes, such as those corresponding to offsets X and Y, have an
# increment of 1, which basically means that any value within range
# is appropriate. The increment is retrieved with the method GetInc().
if cam.OffsetY.GetAccessMode() == PySpin.RW:
cam.OffsetY.SetValue(cam.OffsetY.GetMin())
print 'Offset Y set to %d...' % cam.OffsetY.GetValue()
else:
print 'Offset Y not available...'
result = False
# Set maximum width
#
# *** NOTES ***
# Other nodes, such as those corresponding to image width and height,
# might have an increment other than 1. In these cases, it can be
# important to check that the desired value is a multiple of the
# increment.
#
# This is often the case for width and height nodes. However, because
# these nodes are being set to their maximums, there is no real reason
# to check against the increment.
if cam.Width.GetAccessMode() == PySpin.RW and cam.Width.GetInc() != 0 and cam.Width.GetMax != 0:
cam.Width.SetValue(cam.Width.GetMax())
print 'Width set to %i...' % cam.Width.GetValue()
else:
print 'Width not available...'
result = False
# Set maximum height
#
# *** NOTES ***
# A maximum is retrieved with the method GetMax(). A node's minimum and
# maximum should always be a multiple of its increment.
if cam.Height.GetAccessMode() == PySpin.RW and cam.Height.GetInc() != 0 and cam.Height.GetMax != 0:
cam.Height.SetValue(cam.Height.GetMax())
print 'Height set to %i...' % cam.Height.GetValue()
else:
print 'Height not available...'
result = False
except PySpin.SpinnakerException as ex:
print 'Error: %s' % ex
return False
return result
def print_device_info(cam):
"""
This function prints the device information of the camera from the transport
layer; please see NodeMapInfo example for more in-depth comments on printing
device information from the nodemap.
:param cam: Camera to get device information from.
:type cam: CameraPtr
:return: True if successful, False otherwise.
:rtype: bool
"""
print '\n*** DEVICE INFORMATION ***\n'
try:
result = True
nodemap = cam.GetTLDeviceNodeMap()
node_device_information = PySpin.CCategoryPtr(nodemap.GetNode('DeviceInformation'))
if PySpin.IsAvailable(node_device_information) and PySpin.IsReadable(node_device_information):
features = node_device_information.GetFeatures()
for feature in features:
node_feature = PySpin.CValuePtr(feature)
print '%s: %s' % (node_feature.GetName(),
node_feature.ToString() if PySpin.IsReadable(node_feature) else 'Node not readable')
else:
print 'Device control information not available.'
except PySpin.SpinnakerException as ex:
print 'Error: %s' % ex.message
return False
return result
def acquire_images(cam):
"""
This function acquires and saves 10 images from a device; please see
Acquisition example for more in-depth comments on the acquisition of images.
:param cam: Camera to acquire images from.
:type cam: CameraPtr
:return: True if successful, False otherwise.
:rtype: bool
"""
print '\n*** IMAGE ACQUISITION ***\n'
try:
result = True
# Set acquisition mode to continuous
if cam.AcquisitionMode.GetAccessMode() != PySpin.RW:
print 'Unable to set acquisition mode to continuous. Aborting...'
return False
cam.AcquisitionMode.SetValue(PySpin.AcquisitionMode_Continuous)
print 'Acquisition mode set to continuous...'
# Begin acquiring images
cam.BeginAcquisition()
print 'Acquiring images...'
# Get device serial number for filename
device_serial_number = ''
if cam.TLDevice.DeviceSerialNumber is not None and cam.TLDevice.DeviceSerialNumber.GetAccessMode() == PySpin.RO:
device_serial_number = cam.TLDevice.DeviceSerialNumber.GetValue()
print 'Device serial number retrieved as %s...' % device_serial_number
# Retrieve, convert, and save images
for i in range(NUM_IMAGES):
try:
# Retrieve next received image and ensure image completion
image_result = cam.GetNextImage()
if image_result.IsIncomplete():
print 'Image incomplete with image status %d...' % image_result.GetImageStatus()
else:
# Print image information
width = image_result.GetWidth()
height = image_result.GetHeight()
print 'Grabbed Image %d, width = %d, height = %d' % (i, width, height)
# Convert image to Mono8
image_converted = image_result.Convert(PySpin.PixelFormat_Mono8)
# Create a unique filename
if device_serial_number:
filename = 'ImageFormatControlQS-%s-%d.jpg' % (device_serial_number, i)
else:
filename = 'ImageFormatControlQS-%d.jpg' % i
# Save image
image_converted.Save(filename)
print 'Image saved at %s' % filename
# Release image
image_result.Release()
except PySpin.SpinnakerException as ex:
print 'Error: %s' % ex
result = False
# End acquisition
cam.EndAcquisition()
except PySpin.SpinnakerException as ex:
print 'Error: %s' % ex
result = False
return result
def run_single_camera(cam):
"""
This function acts as the body of the example; please see NodeMapInfo_QuickSpin example for more
in-depth comments on setting up cameras.
:param cam: Camera to run example on.
:type cam: CameraPtr
:return: True if successful, False otherwise.
:rtype: bool
"""
try:
# Initialize camera
cam.Init()
# Print device info
result = print_device_info(cam)
# Configure exposure
if not configure_custom_image_settings(cam):
return False
# Acquire images
result &= acquire_images(cam)
# Deinitialize camera
cam.DeInit()
return result
except PySpin.SpinnakerException as ex:
print 'Error: %s' % ex
return False
def main():
"""
Example entry point; please see Enumeration_QuickSpin example for more
in-depth comments on preparing and cleaning up the system.
:return: True if successful, False otherwise.
:rtype: bool
"""
result = True
# Retrieve singleton reference to system object
system = PySpin.System.GetInstance()
# Get current library version
version = system.GetLibraryVersion()
print 'Library version: %d.%d.%d.%d' % (version.major, version.minor, version.type, version.build)
# Retrieve list of cameras from | |
else:
self.largest_mcs_mmp_result[(molid_L, molid_R)] = [
(self.ref_smi_props[ctx_id], ), (ctx_id, ), frag_L_id, frag_R_id]
# now build the final results on the fly
# double - for each one we compare against what we already have in self.largest_mcs_mmp_result
ctx_natoms = None
if cut_type_id >= 2:
for molid_L, molid_R, ctx1_id, ctx2_id, frag_L_id, frag_R_id in \
self.iterator_double_pairs_dict_numeric(inc_attachpt=False):
#
if ctx1_id in self.ref_smi_props:
ctx_natoms = (self.ref_smi_props[ctx1_id], )
else:
ctx1_smi = self.refsmi_dict[ctx1_id]
ctx1_smi = ctx1_smi.replace("[1", "[9")
ctx1_smi = ctx1_smi.replace("[2", "[1")
ctx1_smi = ctx1_smi.replace("[9", "[2")
try:
ctx_natoms = (self.ref_smi_props[self.refsmi_dict[ctx1_smi]], )
except:
print("ERR >>>")
print(("{} {} {} {} {} {}".format(molid_L, molid_R, ctx1_id, ctx2_id, frag_L_id, frag_R_id)))
print(("{} {} {}".format(ctx1_id, ctx1_smi, self.refsmi_dict[ctx1_smi])))
print("")
if ctx2_id in self.ref_smi_props:
ctx_natoms = ctx_natoms + (self.ref_smi_props[ctx2_id], )
else:
ctx2_smi = self.refsmi_dict[ctx2_id]
ctx2_smi = ctx2_smi.replace("[1", "[9")
ctx2_smi = ctx2_smi.replace("[2", "[1")
ctx2_smi = ctx2_smi.replace("[9", "[2")
ctx_natoms = ctx_natoms + (self.ref_smi_props[self.refsmi_dict[ctx2_smi]], )
# If the indicator flag check_all_context is set to true we need to pre-filter all ctx fragments
# to ensure they are greater than or equal to the specified limit for mdc_atm_hard (maximum double
# cut atoms hard limit). This is a crude filter and could remove valid double cut MCSS.
if mdc_atm_hard is not None:
if ctx_natoms[0] <= mdc_atm_hard:
continue
elif ctx_natoms[1] <= mdc_atm_hard:
continue
#
# Main
# have we seen this smi - smi pair before?
if (molid_L, molid_R) in self.largest_mcs_mmp_result:
# get the number of atoms in the context
num_atoms_existing = self.largest_mcs_mmp_result[(molid_L, molid_R)][0]
if len(num_atoms_existing) > 1:
total_num_atoms_existing = sum(num_atoms_existing)
else:
total_num_atoms_existing = num_atoms_existing[0]
total_num_atoms_new = sum(ctx_natoms)
if total_num_atoms_new > total_num_atoms_existing:
# if it is a double and we have a min fragment setting
if mdc_atm_soft is not None:
# if it falls below the threshold at which we apply this min frag setting
if total_num_atoms_new <= (total_num_atoms_existing + mdc_atm_soft_threshold):
# only keep if both frag sizes are legal
if '[1' in self.refsmi_dict[ctx1_id]:
if (ctx_natoms[0] > mdc_atm_soft) and (ctx_natoms[1] > mdc_atm_soft):
self.largest_mcs_mmp_result[(molid_L, molid_R)] = \
[ctx_natoms, (ctx1_id, ctx2_id), frag_L_id, frag_R_id]
# above threshold so keep anyway
else:
if '[1' in self.refsmi_dict[ctx1_id]:
self.largest_mcs_mmp_result[(molid_L, molid_R)] = \
[ctx_natoms, (ctx1_id, ctx2_id), frag_L_id, frag_R_id]
else:
if '[1' in self.refsmi_dict[ctx1_id]:
self.largest_mcs_mmp_result[(molid_L, molid_R)] = \
[ctx_natoms, (ctx1_id, ctx2_id), frag_L_id, frag_R_id]
# tie-break
elif total_num_atoms_new == total_num_atoms_existing:
# single always wins over double, so only consider this if existing is double
# double cut tie breaks get disambiguated later using custom function
if len(num_atoms_existing) == 1:
continue
else:
# consider the size of the 'smallest fragment' and add if same, replace if bigger,
# drop if smaller
if min(ctx_natoms) > min(num_atoms_existing):
if '[1' in self.refsmi_dict[ctx1_id]:
self.largest_mcs_mmp_result[(molid_L, molid_R)] = \
[ctx_natoms, (ctx1_id, ctx2_id), frag_L_id, frag_R_id]
elif min(ctx_natoms) == min(num_atoms_existing):
self.largest_mcs_mmp_result[(molid_L, molid_R)].extend(
[ctx_natoms, (ctx1_id, ctx2_id), frag_L_id, frag_R_id])
else:
# don't store as we have a better context with a larger 'smallest fragment'
continue
# double cut context must be smaller than what we already have so discard this new one
else:
continue
else:
# new result, case where we only have a double cut MCSS so add it!
if '[1' in self.refsmi_dict[ctx1_id]:
self.largest_mcs_mmp_result[(molid_L, molid_R)] = [ctx_natoms, (ctx1_id, ctx2_id),
frag_L_id, frag_R_id]
with open(out_file, "w") as final_out:
final_out.write('CUT_TYPE,MOL_ID_L,MOL_ID_R,NATOMS,MCSS,FRAG_L,FRAG_R\n')
# do single cut first as these take precedence above a double
for (molid_L, molid_R) in self.largest_mcs_mmp_result:
list_length = len(self.largest_mcs_mmp_result[(molid_L, molid_R)])
# the list self.largest_mcs_mmp_result[(molid_L, molid_R)] contains an ordered list of items
# the first 4 are (1) a tuple of the num_atoms (2) fragment (3&4) context in two parts
# Therefore if the list is greater than 8 items it means we have more than one double
# cut that we need to consider, possibly as a double cut tie break. We do not consider the
# case where there are 8 items as we know this will be two identical fragmentation patterns
# with differing isomeric numbering on the atom attachment points therefore we use >8 not >=8
if list_length > 8:
if len(self.largest_mcs_mmp_result[(molid_L, molid_R)][0]) == 1:
# disambiguate single cut list
final_out.write(process_mcss_list_to_string('SINGLE', self.largest_mcs_mmp_result[
(molid_L, molid_R)][0:4]))
else:
# print("Double won (a): ", molid_L, molid_R, self.largest_mcs_mmp_result[(molid_L, molid_R)])
new_list = disambiguate_double_list(self.largest_mcs_mmp_result[(molid_L, molid_R)])
final_out.write(process_mcss_list_to_string('DOUBLE', new_list))
elif list_length == 4:
# print("Single won (a): ", molid_L, molid_R, self.largest_mcs_mmp_result[(molid_L, molid_R)])
final_out.write(process_mcss_list_to_string('SINGLE', self.largest_mcs_mmp_result[
(molid_L, molid_R)]))
else:
# print("Double wins (b): ", molid_L, molid_R, self.largest_mcs_mmp_result[(molid_L, molid_R)])
# need to remove atom numbering dupes then print
new_list = remove_atom_num_dupes(self.largest_mcs_mmp_result[(molid_L, molid_R)])
final_out.write(process_mcss_list_to_string('DOUBLE', new_list))
class _TestMMPbasedMCSSObjectClass(unittest.TestCase):
"""Test class for MMPDataObjectClass(object) written to use pythons unittest
Example usage:
python mmp_mcss_objects.py
coverage run mmp_mcss_objects.py
coverage report mmp_mcss_objects.py
"""
def setUp(self):
"""Instantiate temp file names, test data objects that get written to temp files
a silent logger object (needed to instantiate class) and the mmp object we'll test"""
self.maxDiff = None
# setup test data location use tempfile.NamedTemporaryFile(delete=False) to persist data on disk
self.temp_file_input_smi_01 = tempfile.NamedTemporaryFile(delete=False, suffix=".smi",
encoding='utf-8', mode='wt')
self.temp_file_input_smi_03 = tempfile.NamedTemporaryFile(delete=False, suffix=".smi",
encoding='utf-8', mode='wt')
self.temp_file_output_pairs = tempfile.NamedTemporaryFile(delete=False)
# setup a logger object
self.mmplogger = logging.getLogger('mmpobjectclass_testlogger')
# logging.disable(logging.CRITICAL)
# create empty mmp object
self.test_mmp_mcss_object = MMPbasedMCSSObjectClass(self.mmplogger)
# data set for use in testing input
self.test_dataset_goldeninput_smi_01 = {
# The following represent synthetic data, analogues of CHEMBL1382609
# https://www.ebi.ac.uk/chembl/compound_report_card/CHEMBL1382609/
# 1. substituents are added to the pyrazole ring to generate side chain MMPs
# H on CHEMBL1382609 between two methyls is changed to Br, F, C, I to
# visually see the change in the smiles string (avoiding Cl as already present)
# e.g.: N1C(=C(Br)C(=N1)C)C
# 2. core ring system is modified (phenyl to pyridine) to see ring switch MMP's
# Presence/Absence of Pyridine-N and N-positional isomerism in Cl-Ph ring
# e.g.: C2=NC(=CS2)C2=CC=C(Cl)C=C2 + addition of N ->
# C2=NC(=CS2)C2=CN=C(Cl)C=C2 + move N around ring ->
# C2=NC(=CS2)C2=NC=C(Cl)C=C2
# for 1,2 single wins
'001': 'N1(C2=NC(=CS2)C2=CC=C(Cl)C=C2)C(=C(Br)C(=N1)C)C',
'002': 'N1(C2=NC(=CS2)C2=CC=C(Cl)C=C2)C(=C(F)C(=N1)C)C',
# for 2,5 double wins tie
'003': 'N1(C2=NC(=CS2)C2=CN=C(Cl)C=C2)C(=C(F)C(=N1)C)C',
# The following represent synthetic data, analogues of CHEMBL1341352
# for 1341352 and it's synthetic unsubstituted analogue there is no double
# https://www.ebi.ac.uk/chembl/compound_report_card/CHEMBL1341352/
'1341352': 'Cc1cc(nn1CC(=O)NCc2ccccc2)C(F)(F)F',
'004': 'c1cc(nn1CC(=O)NCc2ccccc2)',
# more double cut only
# https://www.ebi.ac.uk/chembl/compound_report_card/CHEMBL6211
# https://www.ebi.ac.uk/chembl/compound_report_card/CHEMBL6232
'6211': 'O=C(OCC1N(C(=O)c2cc(c(OC)c(c2)OC)OC)CCN(C1)C(=O)c1cc(c(OC)c(OC)c1)OC)CCCCCCC',
'6232': 'O=C(N1C(CN(C(=O)c2cc(c(OC)c(c2)OC)OC)CC1)COC(=O)CC(C)(C)C)c1cc(c(OC)c(OC)c1)OC'
}
self.test_dataset_goldeninput_smi_03 = {
# repeat of above
'001': 'N1(C2=NC(=CS2)C2=CC=C(Cl)C=C2)C(=C(Br)C(=N1)C)C',
'002': 'N1(C2=NC(=CS2)C2=CC=C(Cl)C=C2)C(=C(F)C(=N1)C)C',
}
# all smiles are output from above input as either a repeat smiles or a fragment of them
self.test_dataset_golden_output_01 = {'CUT_TYPE,MOL_ID_L,MOL_ID_R,NATOMS,MCSS,FRAG_L,FRAG_R': None,
'SINGLE,1,2,19,Clc1ccc(c2csc([n]3[n]c([1cH]c3C)C)[n]2)cc1,[1BrH],[1FH]': None,
'SINGLE,2,1,19,Clc1ccc(c2csc([n]3[n]c([1cH]c3C)C)[n]2)cc1,[1FH],[1BrH]': None,
'DOUBLE,2,3,14,[1ClH].Fc1c([n](c2sc[2cH][n]2)[n]c1C)C,[1cH]1cc[2cH]cc1,[n]1[1cH]cc[2cH]c1': None,
'DOUBLE,3,2,14,[1ClH].Fc1c([n](c2sc[2cH][n]2)[n]c1C)C,[n]1[1cH]cc[2cH]c1,[1cH]1cc[2cH]cc1': None,
'SINGLE,1341352,4,11,O=C(NCc1ccccc1)[1CH3],Cc1[1nH][n]c(C(F)(F)F)c1,[1nH]1[n]ccc1': None,
'SINGLE,4,1341352,11,O=C(NCc1ccccc1)[1CH3],[1nH]1[n]ccc1,Cc1[1nH][n]c(C(F)(F)F)c1': None,
'DOUBLE,6211,6232,40,[1CH4].[2CH3]C(=O)OCC1N(C(=O)c2cc(c(OC)c(c2)OC)OC)CCN(C1)C(=O)c1cc(c(OC)c(OC)c1)OC,[1CH3]CCC[2CH3],C[12CH2]C': None,
'DOUBLE,6232,6211,40,[1CH4].[2CH3]C(=O)OCC1N(C(=O)c2cc(c(OC)c(c2)OC)OC)CCN(C1)C(=O)c1cc(c(OC)c(OC)c1)OC,C[12CH2]C,[1CH3]CCC[2CH3]': None}
self.test_dataset_golden_output_02 = {'CUT_TYPE,MOL_ID_L,MOL_ID_R,NATOMS,MCSS,FRAG_L,FRAG_R': None,
'SINGLE,1,2,19,Clc1ccc(c2csc([n]3[n]c([1cH]c3C)C)[n]2)cc1,[1BrH],[1FH]': None,
'SINGLE,2,1,19,Clc1ccc(c2csc([n]3[n]c([1cH]c3C)C)[n]2)cc1,[1FH],[1BrH]': None,
'SINGLE,2,3,13,Fc1c([n](c2sc[1cH][n]2)[n]c1C)C,Clc1cc[1cH]cc1,Clc1[n]c[1cH]cc1': None,
'SINGLE,3,2,13,Fc1c([n](c2sc[1cH][n]2)[n]c1C)C,Clc1[n]c[1cH]cc1,Clc1cc[1cH]cc1': None,
'SINGLE,1341352,4,11,O=C(NCc1ccccc1)[1CH3],Cc1[1nH][n]c(C(F)(F)F)c1,[1nH]1[n]ccc1': None,
'SINGLE,4,1341352,11,O=C(NCc1ccccc1)[1CH3],[1nH]1[n]ccc1,Cc1[1nH][n]c(C(F)(F)F)c1': None,
'SINGLE,6211,6232,39,[1CH3]C(=O)OCC1N(C(=O)c2cc(c(OC)c(c2)OC)OC)CCN(C1)C(=O)c1cc(c(OC)c(OC)c1)OC,[1CH3]CCCCC,C[1CH](C)C': None,
'SINGLE,6232,6211,39,[1CH3]C(=O)OCC1N(C(=O)c2cc(c(OC)c(c2)OC)OC)CCN(C1)C(=O)c1cc(c(OC)c(OC)c1)OC,C[1CH](C)C,[1CH3]CCCCC': None}
self.test_dataset_golden_output_03 = {'CUT_TYPE,MOL_ID_L,MOL_ID_R,NATOMS,MCSS,FRAG_L,FRAG_R': None,
'SINGLE,1,2,19,Clc1ccc(c2csc([n]3[n]c([1cH]c3C)C)[n]2)cc1,[1BrH],[1FH]': None,
'SINGLE,2,1,19,Clc1ccc(c2csc([n]3[n]c([1cH]c3C)C)[n]2)cc1,[1FH],[1BrH]': None,
'DOUBLE,1,2,19,Clc1ccc(c2csc([n]3[n]c([1cH]c3C)C)[n]2)cc1,[1BrH],[1FH]': None,
'DOUBLE,2,1,19,Clc1ccc(c2csc([n]3[n]c([1cH]c3C)C)[n]2)cc1,[1FH],[1BrH]': None}
# write test data to temp file (smi)
for smi_id, smi in list(self.test_dataset_goldeninput_smi_01.items()):
self.temp_file_input_smi_01.write(smi + " " + smi_id + "\n")
self.temp_file_input_smi_01.close()
# write test data to temp file (smi)
for smi_id, smi in list(self.test_dataset_goldeninput_smi_03.items()):
self.temp_file_input_smi_03.write(smi + " " + smi_id + "\n")
self.temp_file_input_smi_03.close()
# container for results data
self.test_dataset_testresults = {}
def tearDown(self):
"""Tear down object for clean reuse in further tests"""
# clean out the object
self.test_mmp_mcss_object.clean_out_data()
# clean out the temp data store
self.test_dataset_testresults.clear()
os.remove(self.temp_file_input_smi_01.name)
def test_get_largest_mcs_pairs_with_diff(self):
"""Test method to get largest MCS MMP for given smi - smi pair"""
# 6. full build then write of pairs to file, but only for a single named column
self.test_mmp_mcss_object.build_from_dicer(self.temp_file_input_smi_01.name, 'BOTH', 'NONE')
self.test_mmp_mcss_object.enumerate_fragment_properties()
self.test_mmp_mcss_object.get_largest_mcs_pairs(self.temp_file_output_pairs.name, 'BOTH')
# now read it back into temp object and check it's what we wrote out!
test_results_filehandle = open(self.temp_file_output_pairs.name, 'r')
for line in test_results_filehandle:
line = line.rstrip('\r')
line = line.rstrip('\n')
self.test_dataset_testresults[line] = None
test_results_filehandle.close()
#print(self.test_dataset_testresults)
self.assertEqual(self.test_dataset_golden_output_01, self.test_dataset_testresults)
def test_get_largest_mcs_pairs_mdc_atm_hard(self):
"""Test method to get largest MCS MMP for given smi - smi pair"""
# 6. full build then write of pairs to file, but only for a single named column
self.test_mmp_mcss_object.build_from_dicer(self.temp_file_input_smi_01.name, 'BOTH', 'NONE')
self.test_mmp_mcss_object.enumerate_fragment_properties()
self.test_mmp_mcss_object.get_largest_mcs_pairs(self.temp_file_output_pairs.name, 'BOTH', mdc_atm_hard=4)
# now read it back into temp object and check it's what we wrote out!
test_results_filehandle = open(self.temp_file_output_pairs.name, 'r')
for line in test_results_filehandle:
line = line.rstrip('\r')
line = line.rstrip('\n')
self.test_dataset_testresults[line] = None
test_results_filehandle.close()
#print(self.test_dataset_testresults)
self.assertEqual(self.test_dataset_golden_output_02, self.test_dataset_testresults)
def test_get_largest_mcs_pairs_mdc_atm_soft(self):
| |
<reponame>cdmacfadyen/MedPicPy<gh_stars>1-10
"""medpicpy's higher level functions to abstract over reading
in medical imaging data
"""
import glob
from pathlib import Path
from os.path import normpath
import pandas as pd
import numpy as np
import cv2
import logging
from . import io
from .utils import remove_sub_paths
from . import config
logging.getLogger(__name__)
def load_images_from_csv(dataframe,
image_name_column,
image_dir_path,
output_shape,
use_memory_mapping=False):
"""Read in an array of images from paths specified in a csv
##Example
```python
import medpicpy as med
import pandas as pd
description = pd.read_csv("data.csv")
array = med.load_images_from_csv(description, 0, "mini-MIAS/", (224, 224))
```
Args:
dataframe (pandas.DataFrame): A pandas dataframe from the csv
image_name_column (index): Index of column with image names
image_dir_path (string): Path to directory containing images
output_shape (tuple): Output shape for each image
use_memory_mapping (optional, boolean): store the data on disk instead of in memory.
Defaults to False
Returns:
np.Array: Array of images in order
"""
image_names = dataframe[image_name_column]
image_paths = image_names.apply(lambda x : image_dir_path + "/" + x)
image_paths = image_paths.apply(lambda x : normpath(x))
images = load_images_from_paths(image_paths, output_shape, use_memory_mapping=use_memory_mapping)
return images
def load_bounding_boxes_from_csv(
dataframe,
centre_x_column,
centre_y_column,
width_column,
height_column,
x_scale_factor=1,
y_scale_factor=1
): # for bounding boxes need to know if measurements are in pixels or mm
"""Read bounding boxes from dataframe of csv
##Example
```python
import medpicpy as med
import pandas as pd
description = pd.read_csv("data.csv")
# x and y scale factor are new_image_size / original_image_size
# only set if the images were resized when being loaded in
x_scale_factor = 224 / 1024
y_scale_factor = 224 / 1024
xs, ys, widths, heights = med.load_bounding_boxes_from_csv(
description,
4,
5,
6,
6,
x_scale_factor=x_scale_factor,
y_scale_factor=y_scale_factor
)
```
Args:
dataframe (pandas.DataFrame): Dataframe of csv
centre_x_column (index): Index of column for x anchor or box
centre_y_column (index): Index of column for y anchor of box
width_column (index): Index of column for width of box
height_column (index): Index of column for heigh of box.
Can be same as width column for squares or circles.
x_scale_factor (int, optional): Factor to rescale by if image was reshaped. Defaults to 1.
y_scale_factor (int, optional): Factor to rescale by if image was reshaped. Defaults to 1.
Returns:
tuple: 4 tuple of np.Arrays with x, y, widths and heights
"""
bbox_xs = dataframe[centre_x_column]
bbox_xs = bbox_xs.multiply(x_scale_factor)
xs_array = bbox_xs.to_numpy(dtype=np.float16)
bbox_ys = dataframe[centre_y_column]
bbox_ys = bbox_ys.multiply(y_scale_factor)
ys_array = bbox_ys.to_numpy(dtype=np.float16)
bbox_widths = dataframe[width_column]
bbox_widths = bbox_widths.multiply(x_scale_factor)
widths_array = bbox_widths.to_numpy(dtype=np.float16)
bbox_heights = dataframe[height_column]
bbox_heights = bbox_heights.multiply(y_scale_factor)
heights_array = bbox_heights.to_numpy(dtype=np.float16)
array_tuple = (xs_array, ys_array, widths_array, heights_array)
return array_tuple
# To read datasets where the class name is in the directory structure.
# i.e. covid/im001 or no-covid/im001
# pulls the class names from the path and reads in the images
# as a numpy array
# TODO: make this work for 3D images, either make a new function or
# add optional args (would be slice axis and slices to take)
def load_classes_in_directory_name(directory,
image_extension,
output_shape,
class_level=1,
slices_to_take=None,
slice_axis=-2,
use_memory_mapping=False):
"""Parse datasets where the class name is in the
directory structure
Use this when the class name is one of the directory names
in the dataset structure.
## Example
If dataset has directory structure:
```
dataset/
benign/
im001.dcm
im002.dcm
malignant/
im001.dcm
im002.dcm
```
then:
```python
import medpicpy as med
classes, images = med.load_classes_in_directory_name(
"dataset/",
".dcm",
"(128, 128)"
)
print(classes)
# ["benign", "benign", "malignant", "malignant"]
print(images.shape)
# (4, 128, 128)
```
Args:
directory (path): root directory of dataset
image_extension (str): Wildcard for identifying images,
e.g for png's - *.png
output_shape (tuple): Desired output shape of images
class_level (int, optional): Which level of directory structure
contains class name. Defaults to 1.
use_memory_mapping (optional, boolean): store the data on disk instead of in memory.
Defaults to False
Returns:
list(str), np.Array : list of classes and corresponding images with correct shape
"""
path_to_search = directory + "/**/*" + image_extension
files = glob.glob(path_to_search, recursive=True)
files = remove_sub_paths(files)
number_of_files = len(files)
array_shape = (number_of_files,) + output_shape
array = io.allocate_array(array_shape, use_memory_mapping=use_memory_mapping)
classes = np.empty(number_of_files, dtype=object)
for index, name in enumerate(files):
parts = Path(name).parts
class_name = parts[class_level]
image = io.load_image(name, use_memory_mapping=use_memory_mapping)
result = cv2.resize(image, output_shape)
classes[index] = class_name
array[index] = result
return classes, array
def load_images_from_paths(paths, output_shape, use_memory_mapping=False):
"""2D image loading function that takes an array of
paths and an output shape and returns the images in
the same order as the paths. Requires every
path to have an image and every image to be resizeable
to the given output shape.
For higher dimension images use load_series_from_paths.
Args:
paths (list or array-like): paths of images to load
output_shape (tuple): desired shape of each image
use_memory_mapping (optional, boolean): store the data on disk instead of in memory.
Defaults to False
Returns:
np.array: all images in numpy format with given shape
"""
array_length = len(paths)
array_shape = (array_length,) + output_shape # concat tuples to get shape
image_array = io.allocate_array(array_shape, use_memory_mapping=use_memory_mapping)
for i in range(0, array_length):
print("Loading images {} / {}".format(i + 1, len(paths)), end="\r", flush=True)
image_name = paths[i]
image = io.load_image(image_name, use_memory_mapping=use_memory_mapping)
resized = cv2.resize(image, output_shape)
image_array[i] = resized
print("")
return image_array
# slice axis will be -2 for most things since they
# are 1 channel, for colour images would probably be -3
# But I don't think you get colour 3D scans
# It would work for multimodal things stacked on top of each other though
def load_series_from_paths(
paths,
slice_output_shape,
slices_to_take,
slice_axis=-2,
use_memory_mapping=False
):
"""Load an array of 3D scans into memory from their paths.
Useful for e.g. CT or MR scans. Takes a list of paths, the output shape
for each 2D slice and a list containing which slices
to take from each image. To take the first 60 slices
pass range(0, 60).
The output shape should be a tuple of (int, int).
Optionally take which axis to reshape the image along.
For any scans with one channel (grayscale) slices this should
be -2, if there is a colour channel (or its some kind
of multimodal stack) then the axis would be -3.
## Example
If there is dataset with structure:
```
data/
patient-data.csv
ID-001/
SCANS/
CT/
prone.nii.gz
ID-002/
SCANS/
CT/
prone.nii.gz
ID-003/
SCANS/
CT/
prone.nii.gz
```
then:
```python
import pandas as pd
import medpicpy as med
description = pd.read_csv("data/patient-data.csv")
patient_ids = description("id")
filters = ["CT", "prone"]
image_paths = med.get_paths_from_ids(
"data/",
patient_ids,
filters
)
print(image_paths)
# ["data/ID-001/CT/prone.nii.gz", "data/ID-002/CT/prone.nii.gz", "data/ID-003/CT/prone.nii.gz"]
slices_to_take = range(60, 120)
output_slice_shape = (128, 128) # desired shape of each slice in the scan
images = med.load_series_from_paths(
paths,
output_slice_shape,
slices_to_take
)
print(images.shape)
# (3, 60, 128, 128)
```
Args:
paths (list): list of paths to the scans to load
slice_output_shape (tuple): shape each slice should be resized to
slices_to_take (list): list of indices of slices to take
slice_axis (int, optional): axis to resize along. Defaults to -2.
use_memory_mapping (optional, boolean): store the data on disk instead of in memory.
Defaults to False
Returns:
np.array: array of all scans with specified size
"""
output_shape = (len(paths), len(slices_to_take)) + slice_output_shape
output_array = io.allocate_array(output_shape, use_memory_mapping=use_memory_mapping)
for i in range(0, len(paths)):
print("Loading images {} / {}".format(i + 1, len(paths)), end="\r", flush=True)
path = paths[i]
image = io.load_image(path, use_memory_mapping=False)
new_image = io.allocate_array(((len(slices_to_take),) + image[0].shape), use_memory_mapping=False)
for index, slice_index in enumerate(slices_to_take):
new_image[index] = image[slice_index]
final_shape = new_image.shape[:slice_axis] + slice_output_shape + new_image.shape[:slice_axis + 2]
final_image = io.allocate_array(final_shape, use_memory_mapping=use_memory_mapping)
for j in range(final_shape[0]):
image = new_image[j][slice_axis]
image = cv2.resize(image, slice_output_shape)
final_image[j] = image
output_array[i] = final_image
print("")
return output_array
def get_length_of_all_series(paths):
"""Find the number of 2D slices
in a list of images. These images can
be 2D, 3D, or a mixture of both. Also
returns the paths that each slice comes from,
e.g. if an image contains 250 slices,
then that path will be duplicated 250 times
in the array so the original scan is known.
Args:
paths (list(str)): paths to images
Returns:
int, list(str): | |
'Yulin, Guangxi', 'zh': u('\u5e7f\u897f\u7389\u6797\u5e02')},
'861777614':{'en': 'Yulin, Guangxi', 'zh': u('\u5e7f\u897f\u7389\u6797\u5e02')},
'861777615':{'en': 'Yulin, Guangxi', 'zh': u('\u5e7f\u897f\u7389\u6797\u5e02')},
'861777616':{'en': 'Baise, Guangxi', 'zh': u('\u5e7f\u897f\u767e\u8272\u5e02')},
'861777617':{'en': 'Baise, Guangxi', 'zh': u('\u5e7f\u897f\u767e\u8272\u5e02')},
'861811884':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u5357\u4eac\u5e02')},
'861813902':{'en': '<NAME>', 'zh': u('\u65b0\u7586\u660c\u5409\u56de\u65cf\u81ea\u6cbb\u5dde')},
'861811885':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u5357\u4eac\u5e02')},
'861811886':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u65e0\u9521\u5e02')},
'861813639':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u5e38\u5dde\u5e02')},
'861768948':{'en': '<NAME>', 'zh': u('\u798f\u5efa\u7701\u6cc9\u5dde\u5e02')},
'861811887':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u65e0\u9521\u5e02')},
'861779110':{'en': '<NAME>', 'zh': u('\u9655\u897f\u7701\u5b9d\u9e21\u5e02')},
'861811880':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u5357\u4eac\u5e02')},
'861811881':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u5357\u4eac\u5e02')},
'86181187':{'en': '<NAME>', 'zh': u('\u5e7f\u4e1c\u7701\u6df1\u5733\u5e02')},
'86181185':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u5f90\u5dde\u5e02')},
'861811882':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u5357\u4eac\u5e02')},
'86181181':{'en': 'Suzhou, Jiangsu', 'zh': u('\u6c5f\u82cf\u7701\u82cf\u5dde\u5e02')},
'861811883':{'en': 'Nanjing, Jiangsu', 'zh': u('\u6c5f\u82cf\u7701\u5357\u4eac\u5e02')},
'861810317':{'en': 'Cangzhou, Hebei', 'zh': u('\u6cb3\u5317\u7701\u6ca7\u5dde\u5e02')},
'861779119':{'en': 'XiAn, Shaanxi', 'zh': u('\u9655\u897f\u7701\u897f\u5b89\u5e02')},
'861779118':{'en': 'Shangluo, Shaanxi', 'zh': u('\u9655\u897f\u7701\u5546\u6d1b\u5e02')},
'861810316':{'en': 'Langfang, Hebei', 'zh': u('\u6cb3\u5317\u7701\u5eca\u574a\u5e02')},
'861809564':{'en': 'LiuAn, Anhui', 'zh': u('\u5b89\u5fbd\u7701\u516d\u5b89\u5e02')},
'86181239':{'en': 'Shenzhen, Guangdong', 'zh': u('\u5e7f\u4e1c\u7701\u6df1\u5733\u5e02')},
'86181238':{'en': 'Shenzhen, Guangdong', 'zh': u('\u5e7f\u4e1c\u7701\u6df1\u5733\u5e02')},
'86181235':{'en': '<NAME>', 'zh': u('\u5e7f\u4e1c\u7701\u4f5b\u5c71\u5e02')},
'86181237':{'en': '<NAME>', 'zh': u('\u5e7f\u4e1c\u7701\u6df1\u5733\u5e02')},
'86181236':{'en': '<NAME>', 'zh': u('\u5e7f\u4e1c\u7701\u6df1\u5733\u5e02')},
'86181233':{'en': '<NAME>', 'zh': u('\u56db\u5ddd\u7701\u6210\u90fd\u5e02')},
'861810315':{'en': 'Tang<NAME>', 'zh': u('\u6cb3\u5317\u7701\u5510\u5c71\u5e02')},
'861809565':{'en': 'Hefei, Anhui', 'zh': u('\u5b89\u5fbd\u7701\u5408\u80a5\u5e02')},
'861804190':{'en': 'Liaoyang, Liaoning', 'zh': u('\u8fbd\u5b81\u7701\u8fbd\u9633\u5e02')},
'861804191':{'en': 'Liaoyang, Liaoning', 'zh': u('\u8fbd\u5b81\u7701\u8fbd\u9633\u5e02')},
'861804192':{'en': 'Liaoyang, Liaoning', 'zh': u('\u8fbd\u5b81\u7701\u8fbd\u9633\u5e02')},
'861804196':{'en': 'Huludao, Liaoning', 'zh': u('\u8fbd\u5b81\u7701\u846b\u82a6\u5c9b\u5e02')},
'861804197':{'en': 'Huludao, Liaoning', 'zh': u('\u8fbd\u5b81\u7701\u846b\u82a6\u5c9b\u5e02')},
'861804198':{'en': 'Huludao, Liaoning', 'zh': u('\u8fbd\u5b81\u7701\u846b\u82a6\u5c9b\u5e02')},
'861804199':{'en': 'Huludao, Liaoning', 'zh': u('\u8fbd\u5b81\u7701\u846b\u82a6\u5c9b\u5e02')},
'861810314':{'en': 'Chengde, Hebei', 'zh': u('\u6cb3\u5317\u7701\u627f\u5fb7\u5e02')},
'861800752':{'en': 'Huizhou, Guangdong', 'zh': u('\u5e7f\u4e1c\u7701\u60e0\u5dde\u5e02')},
'861800753':{'en': 'Meizhou, Guangdong', 'zh': u('\u5e7f\u4e1c\u7701\u6885\u5dde\u5e02')},
'861800750':{'en': '<NAME>', 'zh': u('\u5e7f\u4e1c\u7701\u6c5f\u95e8\u5e02')},
'861800751':{'en': '<NAME>', 'zh': u('\u5e7f\u4e1c\u7701\u97f6\u5173\u5e02')},
'861800756':{'en': 'Zhuhai, Guangdong', 'zh': u('\u5e7f\u4e1c\u7701\u73e0\u6d77\u5e02')},
'861800757':{'en': 'Foshan, Guangdong', 'zh': u('\u5e7f\u4e1c\u7701\u4f5b\u5c71\u5e02')},
'861800754':{'en': 'Shantou, Guangdong', 'zh': u('\u5e7f\u4e1c\u7701\u6c55\u5934\u5e02')},
'861800755':{'en': 'Shenzhen, Guangdong', 'zh': u('\u5e7f\u4e1c\u7701\u6df1\u5733\u5e02')},
'861800758':{'en': 'Zhaoqing, Guangdong', 'zh': u('\u5e7f\u4e1c\u7701\u8087\u5e86\u5e02')},
'861800759':{'en': '<NAME>', 'zh': u('\u5e7f\u4e1c\u7701\u6e5b\u6c5f\u5e02')},
'861808507':{'en': '<NAME>', 'zh': u('\u8d35\u5dde\u7701\u8d35\u9633\u5e02')},
'861808506':{'en': '<NAME>', 'zh': u('\u8d35\u5dde\u7701\u94dc\u4ec1\u5730\u533a')},
'861808505':{'en': '<NAME>', 'zh': u('\u8d35\u5dde\u7701\u9ed4\u4e1c\u5357\u82d7\u65cf\u4f97\u65cf\u81ea\u6cbb\u5dde')},
'861808504':{'en': '<NAME>', 'zh': u('\u8d35\u5dde\u7701\u9ed4\u5357\u5e03\u4f9d\u65cf\u82d7\u65cf\u81ea\u6cbb\u5dde')},
'861808503':{'en': '<NAME>', 'zh': u('\u8d35\u5dde\u7701\u8d35\u9633\u5e02')},
'861808502':{'en': '<NAME>', 'zh': u('\u8d35\u5dde\u7701\u8d35\u9633\u5e02')},
'861808501':{'en': '<NAME>', 'zh': u('\u8d35\u5dde\u7701\u8d35\u9633\u5e02')},
'861808500':{'en': '<NAME>', 'zh': u('\u8d35\u5dde\u7701\u8d35\u9633\u5e02')},
'861810313':{'en': '<NAME>', 'zh': u('\u6cb3\u5317\u7701\u5f20\u5bb6\u53e3\u5e02')},
'861808509':{'en': '<NAME>', 'zh': u('\u8d35\u5dde\u7701\u8d35\u9633\u5e02')},
'861808508':{'en': '<NAME>', 'zh': u('\u8d35\u5dde\u7701\u8d35\u9633\u5e02')},
'861812073':{'en': '<NAME>', 'zh': u('\u798f\u5efa\u7701\u53a6\u95e8\u5e02')},
'86180898':{'en': '<NAME>', 'zh': u('\u6d77\u5357\u7701\u6d77\u53e3\u5e02')},
'861812444':{'en': '<NAME>', 'zh': u('\u5e7f\u4e1c\u7701\u8302\u540d\u5e02')},
'861811400':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u5357\u4eac\u5e02')},
'861811401':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u5357\u4eac\u5e02')},
'861810312':{'en': 'Baoding, Hebei', 'zh': u('\u6cb3\u5317\u7701\u4fdd\u5b9a\u5e02')},
'861809441':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u5e38\u5dde\u5e02')},
'861811404':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u5357\u4eac\u5e02')},
'861812442':{'en': '<NAME>', 'zh': u('\u5e7f\u4e1c\u7701\u6e5b\u6c5f\u5e02')},
'861811405':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u65e0\u9521\u5e02')},
'861810311':{'en': 'Shijiazhuang, Hebei', 'zh': u('\u6cb3\u5317\u7701\u77f3\u5bb6\u5e84\u5e02')},
'861811407':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u65e0\u9521\u5e02')},
'861770538':{'en': '<NAME>', 'zh': u('\u5c71\u4e1c\u7701\u6cf0\u5b89\u5e02')},
'861770539':{'en': '<NAME>', 'zh': u('\u5c71\u4e1c\u7701\u4e34\u6c82\u5e02')},
'861812448':{'en': '<NAME>', 'zh': u('\u5e7f\u4e1c\u7701\u6cb3\u6e90\u5e02')},
'861810310':{'en': '<NAME>', 'zh': u('\u6cb3\u5317\u7701\u90af\u90f8\u5e02')},
'861801485':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u5357\u4eac\u5e02')},
'861770530':{'en': 'Heze, Shandong', 'zh': u('\u5c71\u4e1c\u7701\u83cf\u6cfd\u5e02')},
'861801487':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u82cf\u5dde\u5e02')},
'861801486':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u82cf\u5dde\u5e02')},
'861801481':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u5357\u4eac\u5e02')},
'861801480':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u5357\u4eac\u5e02')},
'861801483':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u5357\u4eac\u5e02')},
'861801482':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u5357\u4eac\u5e02')},
'861801489':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u82cf\u5dde\u5e02')},
'861801488':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u82cf\u5dde\u5e02')},
'861770533':{'en': '<NAME>', 'zh': u('\u5c71\u4e1c\u7701\u6dc4\u535a\u5e02')},
'861806109':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u5357\u901a\u5e02')},
'861806108':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u6cf0\u5dde\u5e02')},
'861806105':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u6cf0\u5dde\u5e02')},
'861806104':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u6cf0\u5dde\u5e02')},
'861806107':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u6cf0\u5dde\u5e02')},
'861806106':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u6cf0\u5dde\u5e02')},
'861806101':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u6cf0\u5dde\u5e02')},
'861806100':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u6cf0\u5dde\u5e02')},
'861806103':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u6cf0\u5dde\u5e02')},
'861806102':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u6cf0\u5dde\u5e02')},
'861770536':{'en': 'Weifang, Shandong', 'zh': u('\u5c71\u4e1c\u7701\u6f4d\u574a\u5e02')},
'861770537':{'en': 'Jining, Shandong', 'zh': u('\u5c71\u4e1c\u7701\u6d4e\u5b81\u5e02')},
'86180462':{'en': '<NAME>', 'zh': u('\u798f\u5efa\u7701\u53a6\u95e8\u5e02')},
'86180460':{'en': 'Fuzhou, Fujian', 'zh': u('\u798f\u5efa\u7701\u798f\u5dde\u5e02')},
'86180465':{'en': 'Beijing', 'zh': u('\u5317\u4eac\u5e02')},
'861802599':{'en': 'Foshan, Guangdong', 'zh': u('\u5e7f\u4e1c\u7701\u4f5b\u5c71\u5e02')},
'861802596':{'en': '<NAME>', 'zh': u('\u5e7f\u4e1c\u7701\u4f5b\u5c71\u5e02')},
'861802597':{'en': '<NAME>', 'zh': u('\u5e7f\u4e1c\u7701\u4f5b\u5c71\u5e02')},
'86180469':{'en': 'Dongguan, Guangdong', 'zh': u('\u5e7f\u4e1c\u7701\u4e1c\u839e\u5e02')},
'861802595':{'en': 'Foshan, Guangdong', 'zh': u('\u5e7f\u4e1c\u7701\u4f5b\u5c71\u5e02')},
'861802592':{'en': 'Dongguan, Guangdong', 'zh': u('\u5e7f\u4e1c\u7701\u4e1c\u839e\u5e02')},
'861802593':{'en': 'Dongguan, Guangdong', 'zh': u('\u5e7f\u4e1c\u7701\u4e1c\u839e\u5e02')},
'861802590':{'en': 'Dongguan, Guangdong', 'zh': u('\u5e7f\u4e1c\u7701\u4e1c\u839e\u5e02')},
'861802591':{'en': 'Dongguan, Guangdong', 'zh': u('\u5e7f\u4e1c\u7701\u4e1c\u839e\u5e02')},
'861778196':{'en': 'Dazhou, Sichuan', 'zh': u('\u56db\u5ddd\u7701\u8fbe\u5dde\u5e02')},
'861778197':{'en': 'Mianyang, Sichuan', 'zh': u('\u56db\u5ddd\u7701\u7ef5\u9633\u5e02')},
'861778194':{'en': 'Dazhou, Sichuan', 'zh': u('\u56db\u5ddd\u7701\u8fbe\u5dde\u5e02')},
'861778195':{'en': 'Dazhou, Sichuan', 'zh': u('\u56db\u5ddd\u7701\u8fbe\u5dde\u5e02')},
'861778192':{'en': 'Bazhong, Sichuan', 'zh': u('\u56db\u5ddd\u7701\u5df4\u4e2d\u5e02')},
'861778193':{'en': 'Dazhou, Sichuan', 'zh': u('\u56db\u5ddd\u7701\u8fbe\u5dde\u5e02')},
'861778190':{'en': 'Guangyuan, Sichuan', 'zh': u('\u56db\u5ddd\u7701\u5e7f\u5143\u5e02')},
'861778191':{'en': 'Bazhong, Sichuan', 'zh': u('\u56db\u5ddd\u7701\u5df4\u4e2d\u5e02')},
'861770475':{'en': 'Tongliao, Inner Mongolia', 'zh': u('\u5185\u8499\u53e4\u901a\u8fbd\u5e02')},
'861770474':{'en': 'Ulanqab, Inner Mongolia', 'zh': u('\u5185\u8499\u53e4\u4e4c\u5170\u5bdf\u5e03\u5e02')},
'861770477':{'en': 'Ordos, Inner Mongolia', 'zh': u('\u5185\u8499\u53e4\u9102\u5c14\u591a\u65af\u5e02')},
'861770476':{'en': 'Chifeng, Inner Mongolia', 'zh': u('\u5185\u8499\u53e4\u8d64\u5cf0\u5e02')},
'861770471':{'en': 'Hohhot, Inner Mongolia', 'zh': u('\u5185\u8499\u53e4\u547c\u548c\u6d69\u7279\u5e02')},
'861770470':{'en': 'Hulun, Inner Mongolia', 'zh': u('\u5185\u8499\u53e4\u547c\u4f26\u8d1d\u5c14\u5e02')},
'861778198':{'en': 'Mianyang, Sichuan', 'zh': u('\u56db\u5ddd\u7701\u7ef5\u9633\u5e02')},
'861778199':{'en': '<NAME>', 'zh': u('\u56db\u5ddd\u7701\u7ef5\u9633\u5e02')},
'861808374':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u5bbf\u8fc1\u5e02')},
'861809447':{'en': 'Taizhou, Jiangsu', 'zh': u('\u6c5f\u82cf\u7701\u6cf0\u5dde\u5e02')},
'86181196':{'en': 'Hefei, Anhui', 'zh': u('\u5b89\u5fbd\u7701\u5408\u80a5\u5e02')},
'86180607':{'en': 'Fuzhou, Fujian', 'zh': u('\u798f\u5efa\u7701\u798f\u5dde\u5e02')},
'86180606':{'en': 'Fuzhou, Fujian', 'zh': u('\u798f\u5efa\u7701\u798f\u5dde\u5e02')},
'86180605':{'en': 'Fuzhou, Fujian', 'zh': u('\u798f\u5efa\u7701\u798f\u5dde\u5e02')},
'86180603':{'en': 'Ningde, Fujian', 'zh': u('\u798f\u5efa\u7701\u5b81\u5fb7\u5e02')},
'86180602':{'en': 'Zhangzhou, Fujian', 'zh': u('\u798f\u5efa\u7701\u6f33\u5dde\u5e02')},
'86180600':{'en': 'Quanzhou, Fujian', 'zh': u('\u798f\u5efa\u7701\u6cc9\u5dde\u5e02')},
'86180609':{'en': 'Xiamen, Fujian', 'zh': u('\u798f\u5efa\u7701\u53a6\u95e8\u5e02')},
'86180608':{'en': 'Fuzhou, Fujian', 'zh': u('\u798f\u5efa\u7701\u798f\u5dde\u5e02')},
'861770691':{'en': 'Fuzhou, Fujian', 'zh': u('\u798f\u5efa\u7701\u798f\u5dde\u5e02')},
'861770690':{'en': 'Fuzhou, Fujian', 'zh': u('\u798f\u5efa\u7701\u798f\u5dde\u5e02')},
'861770693':{'en': 'Ningde, Fujian', 'zh': u('\u798f\u5efa\u7701\u5b81\u5fb7\u5e02')},
'861770692':{'en': 'Fuzhou, Fujian', 'zh': u('\u798f\u5efa\u7701\u798f\u5dde\u5e02')},
'861770695':{'en': 'Quanzhou, Fujian', 'zh': u('\u798f\u5efa\u7701\u6cc9\u5dde\u5e02')},
'861770694':{'en': '<NAME>', 'zh': u('\u798f\u5efa\u7701\u8386\u7530\u5e02')},
'861770697':{'en': 'Long<NAME>ian', 'zh': u('\u798f\u5efa\u7701\u9f99\u5ca9\u5e02')},
'861770696':{'en': 'Zhangzhou, Fujian', 'zh': u('\u798f\u5efa\u7701\u6f33\u5dde\u5e02')},
'861770699':{'en': 'Quanzhou, Fujian', 'zh': u('\u798f\u5efa\u7701\u6cc9\u5dde\u5e02')},
'861770698':{'en': 'Sanming, Fujian', 'zh': u('\u798f\u5efa\u7701\u4e09\u660e\u5e02')},
'861810898':{'en': 'Lhasa, Tibet', 'zh': u('\u897f\u85cf\u62c9\u8428\u5e02')},
'861810899':{'en': 'Lhasa, Tibet', 'zh': u('\u897f\u85cf\u62c9\u8428\u5e02')},
'861808376':{'en': 'Yangzhou, Jiangsu', 'zh': u('\u6c5f\u82cf\u7701\u626c\u5dde\u5e02')},
'861800998':{'en': 'Kashi, Xinjiang', 'zh': u('\u65b0\u7586\u5580\u4ec0\u5730\u533a')},
'861800999':{'en': 'Ili, Xinjiang', 'zh': u('\u65b0\u7586\u4f0a\u7281\u54c8\u8428\u514b\u81ea\u6cbb\u5dde')},
'861800994':{'en': 'Changji, Xinjiang', 'zh': u('\u65b0\u7586\u660c\u5409\u56de\u65cf\u81ea\u6cbb\u5dde')},
'861800995':{'en': 'Turpan, Xinjiang', 'zh': u('\u65b0\u7586\u5410\u9c81\u756a\u5730\u533a')},
'861800996':{'en': 'Bayingolin, Xinjiang', 'zh': u('\u65b0\u7586\u5df4\u97f3\u90ed\u695e\u8499\u53e4\u81ea\u6cbb\u5dde')},
'861800997':{'en': 'Aksu, Xinjiang', 'zh': u('\u65b0\u7586\u963f\u514b\u82cf\u5730\u533a')},
'861800990':{'en': 'Karamay, Xinjiang', 'zh': u('\u65b0\u7586\u514b\u62c9\u739b\u4f9d\u5e02')},
'861800991':{'en': 'Urumchi, Xinjiang', 'zh': u('\u65b0\u7586\u4e4c\u9c81\u6728\u9f50\u5e02')},
'861800992':{'en': 'Ili, Xinjiang', 'zh': u('\u65b0\u7586\u4f0a\u7281\u54c8\u8428\u514b\u81ea\u6cbb\u5dde')},
'861800993':{'en': 'Shihezi, Xinjiang', 'zh': u('\u65b0\u7586\u77f3\u6cb3\u5b50\u5e02')},
'861809818':{'en': 'Foshan, Guangdong', 'zh': u('\u5e7f\u4e1c\u7701\u4f5b\u5c71\u5e02')},
'861809819':{'en': '<NAME>', 'zh': u('\u5e7f\u4e1c\u7701\u4f5b\u5c71\u5e02')},
'861809812':{'en': 'Chaozhou, Guangdong', 'zh': u('\u5e7f\u4e1c\u7701\u6f6e\u5dde\u5e02')},
'861809813':{'en': 'Chaozhou, Guangdong', 'zh': u('\u5e7f\u4e1c\u7701\u6f6e\u5dde\u5e02')},
'861809810':{'en': 'Chaozhou, Guangdong', 'zh': u('\u5e7f\u4e1c\u7701\u6f6e\u5dde\u5e02')},
'861809811':{'en': 'Chaozhou, Guangdong', 'zh': u('\u5e7f\u4e1c\u7701\u6f6e\u5dde\u5e02')},
'861809816':{'en': 'Foshan, Guangdong', 'zh': u('\u5e7f\u4e1c\u7701\u4f5b\u5c71\u5e02')},
'861808377':{'en': 'Yangzhou, Jiangsu', 'zh': u('\u6c5f\u82cf\u7701\u626c\u5dde\u5e02')},
'861809814':{'en': 'Chaozhou, Guangdong', 'zh': u('\u5e7f\u4e1c\u7701\u6f6e\u5dde\u5e02')},
'861809815':{'en': 'Foshan, Guangdong', 'zh': u('\u5e7f\u4e1c\u7701\u4f5b\u5c71\u5e02')},
'861762543':{'en': 'Nantong, Jiangsu', 'zh': u('\u6c5f\u82cf\u7701\u5357\u901a\u5e02')},
'861808370':{'en': 'Taizhou, Jiangsu', 'zh': u('\u6c5f\u82cf\u7701\u6cf0\u5dde\u5e02')},
'861812258':{'en': 'Maoming, Guangdong', 'zh': u('\u5e7f\u4e1c\u7701\u8302\u540d\u5e02')},
'861812259':{'en': '<NAME>', 'zh': u('\u5e7f\u4e1c\u7701\u60e0\u5dde\u5e02')},
'861809128':{'en': '<NAME>', 'zh': u('\u9655\u897f\u7701\u897f\u5b89\u5e02')},
'861808371':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u6cf0\u5dde\u5e02')},
'86180760':{'en': '<NAME>', 'zh': u('\u8d35\u5dde\u7701\u6bd5\u8282\u5730\u533a')},
'861812256':{'en': 'Maoming, Guangdong', 'zh': u('\u5e7f\u4e1c\u7701\u8302\u540d\u5e02')},
'861812257':{'en': 'Maoming, Guangdong', 'zh': u('\u5e7f\u4e1c\u7701\u8302\u540d\u5e02')},
'861813386':{'en': 'Langfang, Hebei', 'zh': u('\u6cb3\u5317\u7701\u5eca\u574a\u5e02')},
'861808372':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u6cf0\u5dde\u5e02')},
'861812251':{'en': '<NAME>', 'zh': u('\u5e7f\u4e1c\u7701\u6cb3\u6e90\u5e02')},
'861811430':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u5e38\u5dde\u5e02')},
'861808373':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u5bbf\u8fc1\u5e02')},
'861811433':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u5e38\u5dde\u5e02')},
'861811842':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u82cf\u5dde\u5e02')},
'861806920':{'en': 'Ningbo, Zhejiang', 'zh': u('\u6d59\u6c5f\u7701\u5b81\u6ce2\u5e02')},
'861809129':{'en': '<NAME>', 'zh': u('\u9655\u897f\u7701\u897f\u5b89\u5e02')},
'861811435':{'en': 'Suzhou, Jiangsu', 'zh': u('\u6c5f\u82cf\u7701\u82cf\u5dde\u5e02')},
'861811558':{'en': 'Zhenjiang, Jiangsu', 'zh': u('\u6c5f\u82cf\u7701\u9547\u6c5f\u5e02')},
'861806927':{'en': 'Ningbo, Zhejiang', 'zh': u('\u6d59\u6c5f\u7701\u5b81\u6ce2\u5e02')},
'861811434':{'en': 'Suqian, Jiangsu', 'zh': u('\u6c5f\u82cf\u7701\u5bbf\u8fc1\u5e02')},
'861803418':{'en': 'Hengshui, Hebei', 'zh': u('\u6cb3\u5317\u7701\u8861\u6c34\u5e02')},
'861803419':{'en': 'Xingtai, Hebei', 'zh': u('\u6cb3\u5317\u7701\u90a2\u53f0\u5e02')},
'861803414':{'en': 'Shijiazhuang, Hebei', 'zh': u('\u6cb3\u5317\u7701\u77f3\u5bb6\u5e84\u5e02')},
'861803415':{'en': 'Tangshan, Hebei', 'zh': u('\u6cb3\u5317\u7701\u5510\u5c71\u5e02')},
'861803416':{'en': 'Langfang, Hebei', 'zh': u('\u6cb3\u5317\u7701\u5eca\u574a\u5e02')},
'861803417':{'en': 'Cangzhou, Hebei', 'zh': u('\u6cb3\u5317\u7701\u6ca7\u5dde\u5e02')},
'861803410':{'en': 'Handan, Hebei', 'zh': u('\u6cb3\u5317\u7701\u90af\u90f8\u5e02')},
'861803411':{'en': 'Shijiazhuang, Hebei', 'zh': u('\u6cb3\u5317\u7701\u77f3\u5bb6\u5e84\u5e02')},
'861803412':{'en': 'Baoding, Hebei', 'zh': u('\u6cb3\u5317\u7701\u4fdd\u5b9a\u5e02')},
'861803413':{'en': 'Zhangjiakou, Hebei', 'zh': u('\u6cb3\u5317\u7701\u5f20\u5bb6\u53e3\u5e02')},
'861770996':{'en': 'Bayingolin, Xinjiang', 'zh': u('\u65b0\u7586\u5df4\u97f3\u90ed\u695e\u8499\u53e4\u81ea\u6cbb\u5dde')},
'861770997':{'en': 'Aksu, Xinjiang', 'zh': u('\u65b0\u7586\u963f\u514b\u82cf\u5730\u533a')},
'861770994':{'en': 'Changji, Xinjiang', 'zh': u('\u65b0\u7586\u660c\u5409\u56de\u65cf\u81ea\u6cbb\u5dde')},
'861770995':{'en': 'Turpan, Xinjiang', 'zh': u('\u65b0\u7586\u5410\u9c81\u756a\u5730\u533a')},
'861770992':{'en': 'Ili, Xinjiang', 'zh': u('\u65b0\u7586\u4f0a\u7281\u54c8\u8428\u514b\u81ea\u6cbb\u5dde')},
'861770993':{'en': 'Shihezi, Xinjiang', 'zh': u('\u65b0\u7586\u77f3\u6cb3\u5b50\u5e02')},
'861770990':{'en': 'Karamay, Xinjiang', 'zh': u('\u65b0\u7586\u514b\u62c9\u739b\u4f9d\u5e02')},
'861770991':{'en': 'Urumchi, Xinjiang', 'zh': u('\u65b0\u7586\u4e4c\u9c81\u6728\u9f50\u5e02')},
'861770998':{'en': '<NAME>', 'zh': u('\u65b0\u7586\u5580\u4ec0\u5730\u533a')},
'861770999':{'en': '<NAME>', 'zh': u('\u65b0\u7586\u4f0a\u7281\u54c8\u8428\u514b\u81ea\u6cbb\u5dde')},
'861808770':{'en': '<NAME>', 'zh': u('\u4e91\u5357\u7701\u7389\u6eaa\u5e02')},
'861808771':{'en': '<NAME>', 'zh': u('\u4e91\u5357\u7701\u7389\u6eaa\u5e02')},
'861808772':{'en': '<NAME>', 'zh': u('\u4e91\u5357\u7701\u666e\u6d31\u5e02')},
'861808773':{'en': '<NAME>', 'zh': u('\u4e91\u5357\u7701\u666e\u6d31\u5e02')},
'861808312':{'en': '<NAME>', 'zh': u('\u8d35\u5dde\u7701\u9ed4\u5357\u5e03\u4f9d\u65cf\u82d7\u65cf\u81ea\u6cbb\u5dde')},
'861808775':{'en': '<NAME>', 'zh': u('\u4e91\u5357\u7701\u6606\u660e\u5e02')},
'861808776':{'en': '<NAME>', 'zh': u('\u4e91\u5357\u7701\u6606\u660e\u5e02')},
'861808777':{'en': '<NAME>', 'zh': u('\u4e91\u5357\u7701\u7389\u6eaa\u5e02')},
'861808778':{'en': '<NAME>', 'zh': u('\u4e91\u5357\u7701\u7389\u6eaa\u5e02')},
'861808779':{'en': '<NAME>', 'zh': u('\u4e91\u5357\u7701\u7389\u6eaa\u5e02')},
'861810011':{'en': 'Tianjin', 'zh': u('\u5929\u6d25\u5e02')},
'861808998':{'en': 'Lhasa, Tibet', 'zh': u('\u897f\u85cf\u62c9\u8428\u5e02')},
'861808999':{'en': 'Lhasa, Tibet', 'zh': u('\u897f\u85cf\u62c9\u8428\u5e02')},
'861811436':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u82cf\u5dde\u5e02')},
'861772228':{'en': 'Guangzhou, Guangdong', 'zh': u('\u5e7f\u4e1c\u7701\u5e7f\u5dde\u5e02')},
'861772229':{'en': 'Maoming, Guangdong', 'zh': u('\u5e7f\u4e1c\u7701\u8302\u540d\u5e02')},
'861772226':{'en': 'Guangzhou, Guangdong', 'zh': u('\u5e7f\u4e1c\u7701\u5e7f\u5dde\u5e02')},
'861772227':{'en': 'Zhanjiang, Guangdong', 'zh': u('\u5e7f\u4e1c\u7701\u6e5b\u6c5f\u5e02')},
'861772224':{'en': 'Zhaoqing, Guangdong', 'zh': u('\u5e7f\u4e1c\u7701\u8087\u5e86\u5e02')},
'861772225':{'en': 'Jieyang, Guangdong', 'zh': u('\u5e7f\u4e1c\u7701\u63ed\u9633\u5e02')},
'861772222':{'en': 'Shenzhen, Guangdong', 'zh': u('\u5e7f\u4e1c\u7701\u6df1\u5733\u5e02')},
'861772223':{'en': 'Heyuan, Guangdong', 'zh': u('\u5e7f\u4e1c\u7701\u6cb3\u6e90\u5e02')},
'861772220':{'en': 'Guangzhou, Guangdong', 'zh': u('\u5e7f\u4e1c\u7701\u5e7f\u5dde\u5e02')},
'861772221':{'en': 'Chaozhou, Guangdong', 'zh': u('\u5e7f\u4e1c\u7701\u6f6e\u5dde\u5e02')},
'861808313':{'en': 'Qiannan, Guizhou', 'zh': u('\u8d35\u5dde\u7701\u9ed4\u5357\u5e03\u4f9d\u65cf\u82d7\u65cf\u81ea\u6cbb\u5dde')},
'861810012':{'en': 'Ningbo, Zhejiang', 'zh': u('\u6d59\u6c5f\u7701\u5b81\u6ce2\u5e02')},
'861810089':{'en': 'Kunming, Yunnan', 'zh': u('\u4e91\u5357\u7701\u6606\u660e\u5e02')},
'86180852':{'en': 'Zunyi, Guizhou', 'zh': u('\u8d35\u5dde\u7701\u9075\u4e49\u5e02')},
'86180853':{'en': 'Anshun, Guizhou', 'zh': u('\u8d35\u5dde\u7701\u5b89\u987a\u5e02')},
'86180851':{'en': '<NAME>', 'zh': u('\u8d35\u5dde\u7701\u8d35\u9633\u5e02')},
'86180856':{'en': '<NAME>', 'zh': u('\u8d35\u5dde\u7701\u94dc\u4ec1\u5730\u533a')},
'86180857':{'en': 'Bijie, Guizhou', 'zh': u('\u8d35\u5dde\u7701\u6bd5\u8282\u5730\u533a')},
'86180854':{'en': '<NAME>', 'zh': u('\u8d35\u5dde\u7701\u9ed4\u5357\u5e03\u4f9d\u65cf\u82d7\u65cf\u81ea\u6cbb\u5dde')},
'86180855':{'en': '<NAME>', 'zh': u('\u8d35\u5dde\u7701\u9ed4\u4e1c\u5357\u82d7\u65cf\u4f97\u65cf\u81ea\u6cbb\u5dde')},
'861807901':{'en': '<NAME>', 'zh': u('\u6c5f\u897f\u7701\u65b0\u4f59\u5e02')},
'86180858':{'en': '<NAME>', 'zh': u('\u8d35\u5dde\u7701\u516d\u76d8\u6c34\u5e02')},
'86180859':{'en': '<NAME>', 'zh': u('\u8d35\u5dde\u7701\u9ed4\u897f\u5357\u5e03\u4f9d\u65cf\u82d7\u65cf\u81ea\u6cbb\u5dde')},
'861804879':{'en': '<NAME>', 'zh': u('\u56db\u5ddd\u7701\u9042\u5b81\u5e02')},
'861804878':{'en': '<NAME>', 'zh': u('\u56db\u5ddd\u7701\u7709\u5c71\u5e02')},
'861804877':{'en': '<NAME>', 'zh': u('\u56db\u5ddd\u7701\u5b9c\u5bbe\u5e02')},
'861804876':{'en': '<NAME>uan', 'zh': u('\u56db\u5ddd\u7701\u5b9c\u5bbe\u5e02')},
'861804875':{'en': '<NAME>', 'zh': u('\u56db\u5ddd\u7701\u51c9\u5c71\u5f5d\u65cf\u81ea\u6cbb\u5dde')},
'861804874':{'en': 'Liangshan, Sichuan', 'zh': u('\u56db\u5ddd\u7701\u51c9\u5c71\u5f5d\u65cf\u81ea\u6cbb\u5dde')},
'861804873':{'en': '<NAME>', 'zh': u('\u56db\u5ddd\u7701\u8fbe\u5dde\u5e02')},
'861804872':{'en': '<NAME>', 'zh': u('\u56db\u5ddd\u7701\u8fbe\u5dde\u5e02')},
'861804871':{'en': 'Ne<NAME>', 'zh': u('\u56db\u5ddd\u7701\u5185\u6c5f\u5e02')},
'861804870':{'en': 'Neijiang, Sichuan', 'zh': u('\u56db\u5ddd\u7701\u5185\u6c5f\u5e02')},
'861800149':{'en': 'Changzhou, Jiangsu', 'zh': u('\u6c5f\u82cf\u7701\u5e38\u5dde\u5e02')},
'861800148':{'en': 'Nantong, Jiangsu', 'zh': u('\u6c5f\u82cf\u7701\u5357\u901a\u5e02')},
'861800147':{'en': 'Nantong, Jiangsu', 'zh': u('\u6c5f\u82cf\u7701\u5357\u901a\u5e02')},
'861800146':{'en': 'Nantong, Jiangsu', 'zh': u('\u6c5f\u82cf\u7701\u5357\u901a\u5e02')},
'861800145':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u626c\u5dde\u5e02')},
'861800144':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u626c\u5dde\u5e02')},
'861800143':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u6cf0\u5dde\u5e02')},
'861800142':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u6cf0\u5dde\u5e02')},
'861800141':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u76d0\u57ce\u5e02')},
'861800140':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u6dee\u5b89\u5e02')},
'861802127':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u82cf\u5dde\u5e02')},
'861802126':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u82cf\u5dde\u5e02')},
'861802125':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u82cf\u5dde\u5e02')},
'861802124':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u82cf\u5dde\u5e02')},
'861802123':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u82cf\u5dde\u5e02')},
'861802122':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u9547\u6c5f\u5e02')},
'861802121':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u9547\u6c5f\u5e02')},
'861802120':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u9547\u6c5f\u5e02')},
'8617621':{'en': 'Shanghai', 'zh': u('\u4e0a\u6d77\u5e02')},
| |
<reponame>ndessart/olympe<gh_stars>0
# -*- coding: UTF-8 -*-
# Copyright (C) 2019 Parrot Drones SAS
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of the Parrot Company nor the names
# of its contributors may be used to endorse or promote products
# derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# PARROT COMPANY BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
# OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
# AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
# OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
# SUCH DAMAGE.
from __future__ import absolute_import
from __future__ import unicode_literals
from aenum import IntFlag
import concurrent.futures
import ctypes
import logging
import olympe_deps as od
import os
import threading
try:
from itertools import ifilter as filter
except ImportError:
# python3
pass
logger = logging.getLogger("concurrent.futures")
logger.addHandler(logging.StreamHandler())
logger.setLevel(logging.DEBUG)
class PompEvent(IntFlag):
IN = od.POMP_FD_EVENT_IN
PRI = od.POMP_FD_EVENT_PRI
OUT = od.POMP_FD_EVENT_OUT
ERR = od.POMP_FD_EVENT_ERR
HUP = od.POMP_FD_EVENT_HUP
class Future(concurrent.futures.Future):
"""
A chainable Future class
"""
def __init__(self, loop):
super(Future, self).__init__()
self._loop = loop
def _register(self):
self._loop._register_future(id(self))
self.add_done_callback(lambda _: self._loop._unregister_future(
id(self), ignore_error=True))
def __del__(self):
self._loop._unregister_future(id(self), ignore_error=True)
def set_from(self, source):
if source.cancelled():
self.cancel()
return
if self.done():
return
if not self.set_running_or_notify_cancel():
return
exception = source.exception()
if exception is not None:
self.set_exception(exception)
else:
result = source.result()
self.set_result(result)
def chain(self, next_):
self.add_done_callback(lambda _: next_.set_from(self))
def then(self, fn, deferred=False):
result = Future(self._loop)
result._register()
def callback(_):
try:
if deferred:
temp = self._loop.run_later(fn, self.result())
temp.chain(result)
elif not threading.current_thread() is self._loop:
temp = self._loop.run_async(fn, self.result())
temp.chain(result)
else:
result.set_result(fn(self.result()))
except Exception as e:
self.logging.exception(
"Unhandled exception while chaining futures"
)
result.set_exception(e)
except:
result.cancel()
self.add_done_callback(callback)
return result
def result_or_cancel(self, timeout=None):
try:
return self.result(timeout=timeout)
except:
self.cancel()
raise
class PompLoopThread(threading.Thread):
"""
Class running a pomp loop in a pomp thread.
It performs all calls to pomp and arsdk-ng within the loop (except init and destruction)
"""
def __init__(self, logging):
self.logging = logging
self.running = False
self.pomptimeout_ms = 100
self.async_pomp_task = list()
self.deferred_pomp_task = list()
self.wakeup_evt = od.pomp_evt_new()
self.pomp_events = dict()
self.pomp_event_callbacks = dict()
self.pomp_loop = None
self.pomp_timers = {}
self.pomp_timer_callbacks = {}
self.evt_userdata = dict()
self.fd_userdata = dict()
self.c_fd_userdata = dict()
self.c_evt_userdata = dict()
self.pomp_fd_callbacks = dict()
self.cleanup_functions = []
self.futures = []
self._create_pomp_loop()
super(PompLoopThread, self).__init__()
def destroy(self):
# stop the thread
self.stop()
self._remove_event_from_loop(self.wakeup_evt)
# remove all fds from the loop
self._destroy_pomp_loop_fds()
# remove all timers from the loop
self._destroy_pomp_loop_timers()
# destroy the loop
self._destroy_pomp_loop()
def start(self):
self.running = True
super().start()
def stop(self):
"""
Stop thread to manage commands send to the drone
"""
if not self.running:
return False
self.running = False
if threading.current_thread().ident != self.ident:
self._wake_up()
self.join()
return True
def run_async(self, func, *args, **kwds):
"""
Fills in a list with the function to be executed in the pomp thread
and wakes up the pomp thread.
"""
future = Future(self)
future._register()
if threading.current_thread() is not self:
self.async_pomp_task.append((future, func, args, kwds))
self._wake_up()
else:
try:
ret = func(*args, **kwds)
except Exception as e:
self.logging.exception(
"Unhandled exception in async task function"
)
future.set_exception(e)
else:
if not isinstance(ret, concurrent.futures.Future):
future.set_result(ret)
else:
ret.chain(future)
return future
def run_later(self, func, *args, **kwds):
"""
Fills in a list with the function to be executed later in the pomp thread
"""
future = Future(self)
future._register()
self.deferred_pomp_task.append((future, func, args, kwds))
return future
def _wake_up_event_cb(self, pomp_evt, _userdata):
"""
Callback received when a pomp_evt is triggered.
"""
# the pomp_evt is acknowledged by libpomp
def _run_task_list(self, task_list):
"""
execute all pending functions located in the task list
this is done in the order the list has been filled in
"""
while len(task_list):
future, f, args, kwds = task_list.pop(0)
try:
ret = f(*args, **kwds)
except Exception as e:
self.logging.exception(
"Unhandled exception in async task function"
)
self._unregister_future(future, ignore_error=True)
future.set_exception(e)
continue
if not isinstance(ret, concurrent.futures.Future):
future.set_result(ret)
else:
ret.chain(future)
def run(self):
"""
Thread's main loop
"""
self._add_event_to_loop(
self.wakeup_evt, lambda *args: self._wake_up_event_cb(*args)
)
# We have to monitor the main thread exit. This is the simplest way to
# let the main thread handle the signals while still being able to
# perform some cleanup before the process exit. If we don't monitor the
# main thread, this thread will hang the process when the process
# receive SIGINT (or any other non fatal signal).
main_thread = next(
filter(lambda t: t.name == "MainThread", threading.enumerate())
)
try:
while self.running and main_thread.is_alive():
try:
self._wait_and_process()
except RuntimeError as e:
self.logging.error("Exception caught: {}".format(e))
self._run_task_list(self.async_pomp_task)
self._run_task_list(self.deferred_pomp_task)
finally:
self.running = False
# Perform some cleanup before this thread dies
self._cleanup()
self.destroy()
def _wait_and_process(self):
od.pomp_loop_wait_and_process(self.pomp_loop, self.pomptimeout_ms)
def _wake_up(self):
if self.wakeup_evt:
od.pomp_evt_signal(self.wakeup_evt)
def add_fd_to_loop(self, fd, cb, fd_events, userdata=None):
return self.run_async(self._add_fd_to_loop, fd, cb, fd_events, userdata=userdata)
def has_fd(self, fd):
try:
return self.run_async(self._has_fd, fd).result_or_cancel(timeout=5)
except concurrent.futures.TimeoutError:
return False
def _has_fd(self, fd):
return bool(od.pomp_loop_has_fd(self.pomp_loop, fd) == 1)
def _add_fd_to_loop(self, fd, cb, fd_events, userdata=None):
if cb is None:
self.logging.info(
"Cannot add fd '{}' to pomp loop without "
"a valid callback function".format(fd)
)
return None
self.fd_userdata[fd] = userdata
userdata = ctypes.cast(
ctypes.pointer(ctypes.py_object(userdata)), ctypes.c_void_p
)
self.c_fd_userdata[fd] = userdata
self.pomp_fd_callbacks[fd] = od.pomp_fd_event_cb_t(cb)
res = od.pomp_loop_add(
self.pomp_loop,
ctypes.c_int32(fd),
od.uint32_t(int(fd_events)),
self.pomp_fd_callbacks[fd],
userdata
)
if res != 0:
raise RuntimeError(
"Cannot add fd '{}' to pomp loop: {} ({})".format(
fd, os.strerror(-res), res)
)
def remove_fd_from_loop(self, fd):
return self.run_async(self._remove_fd_from_loop, fd)
def _remove_fd_from_loop(self, fd):
self.fd_userdata.pop(fd, None)
self.c_fd_userdata.pop(fd, None)
if self.pomp_fd_callbacks.pop(fd, None) is not None:
if od.pomp_loop_remove(self.pomp_loop, fd) != 0:
self.logging.error("Cannot remove fd '{}' from pomp loop".format(fd))
return False
return True
def add_event_to_loop(self, *args, **kwds):
"""
Add a pomp event to the loop
"""
self.run_async(self._add_event_to_loop, *args, **kwds)
def _add_event_to_loop(self, pomp_evt, cb, userdata=None):
evt_id = id(pomp_evt)
self.pomp_events[evt_id] = pomp_evt
self.pomp_event_callbacks[evt_id] = od.pomp_evt_cb_t(cb)
self.evt_userdata[evt_id] = userdata
userdata = ctypes.cast(
ctypes.pointer(ctypes.py_object(userdata)), ctypes.c_void_p
)
self.c_evt_userdata[evt_id] = userdata
res = od.pomp_evt_attach_to_loop(
pomp_evt, self.pomp_loop, self.pomp_event_callbacks[evt_id], userdata
)
if res != 0:
raise RuntimeError("Cannot add eventfd to pomp loop")
def remove_event_from_loop(self, *args, **kwds):
"""
Remove a pomp event from the loop
"""
self.run_later(self._remove_event_from_loop, *args, **kwds)
def _remove_event_from_loop(self, pomp_evt):
evt_id = id(pomp_evt)
self.evt_userdata.pop(evt_id, None)
self.c_evt_userdata.pop(evt_id, None)
self.pomp_event_callbacks.pop(evt_id, None)
if self.pomp_events.pop(evt_id, None) is not None:
if od.pomp_evt_detach_from_loop(pomp_evt, self.pomp_loop) != 0:
self.logging.error('Cannot remove event "%s" from pomp loop' % evt_id)
def _destroy_pomp_loop_fds(self):
evts = list(self.pomp_events.values())[:]
for evt in evts:
self._remove_event_from_loop(evt)
fds = list(self.pomp_fd_callbacks.keys())[:]
for fd in fds:
self._remove_fd_from_loop(fd)
def _create_pomp_loop(self):
self.logging.info("Creating pomp loop")
self.pomp_loop = od.pomp_loop_new()
if self.pomp_loop is None:
raise RuntimeError("Cannot create pomp loop")
def _destroy_pomp_loop(self):
if self.pomp_loop is not None:
res = od.pomp_loop_destroy(self.pomp_loop)
if res != 0:
self.logging.error(
"Error while destroying pomp loop: {}".format(res))
return False
else:
self.logging.info("Pomp loop has been destroyed")
self.pomp_loop = None
return True
def create_timer(self, callback):
self.logging.info("Creating pomp timer")
pomp_callback = od.pomp_timer_cb_t(lambda *args: callback(*args))
pomp_timer = od.pomp_timer_new(self.pomp_loop, pomp_callback, None)
if pomp_timer is None:
raise RuntimeError("Unable to create pomp timer")
self.pomp_timers[id(pomp_timer)] = pomp_timer
self.pomp_timer_callbacks[id(pomp_timer)] = pomp_callback
return pomp_timer
def set_timer(self, pomp_timer, delay, period):
res = od.pomp_timer_set_periodic(pomp_timer, delay, period)
return res == 0
def clear_timer(self, pomp_timer):
res = od.pomp_timer_clear(pomp_timer)
return res == 0
def destroy_timer(self, pomp_timer):
if id(pomp_timer) not in self.pomp_timers:
return False
res = od.pomp_timer_destroy(pomp_timer)
if res != 0:
self.logging.error(
"Error while destroying pomp loop timer: {}".format(res))
return False
else:
del self.pomp_timers[id(pomp_timer)]
del self.pomp_timer_callbacks[id(pomp_timer)]
self.logging.info("Pomp loop timer has been destroyed")
return True
def _destroy_pomp_loop_timers(self):
pomp_timers = list(self.pomp_timers.values())[:]
for pomp_timer in pomp_timers:
self.destroy_timer(pomp_timer)
def register_cleanup(self, fn):
self.cleanup_functions.append(fn)
def unregister_cleanup(self, fn, ignore_error=False):
try:
self.cleanup_functions.remove(fn)
except ValueError:
| |
"""
Operative and Processing Interface - OPI module of SOSim application.
Copyright (C) 2019 <NAME> & <NAME>
This file is part of SOSim - Subsurface Oil Simulator.
SOSim is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
any later version.
SOSim is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with SOSim. If not, see <http://www.gnu.org/licenses/>.
This code was developed as computational wrapping for a project funded
by a grant from GoMRI.
It is also a Deliverable of PhD Theses and dissertations for PhD degrees of <NAME> and Chao Ji.
University of Miami,
May 2018 - June 2022.
For development support contact <EMAIL>
"""
PlayPath = "Backstage"
qgis_prefix = "C:/Program Files/QGIS 2.14/apps/qgis-ltr"
SOSimPath = ""
PlayPath = "Backstage"
ResultsPath = "Results"
# myfile = ''
myfile_list = []
myfile1_list = []
myfile2_list = []
global cur
k_zt = 0
#__________________________________________________________________________________________________________________________
# Imports:
import sys
import os
import re
import math
import numpy
import calendar
import string
import time
import shutil
from math import *
from numpy import *
import pickle
from qgis.core import *
from qgis.gui import *
from PyQt4.QtCore import *
from PyQt4.QtGui import *
import SOSimgui as SOSimgui
from PyQt4.QtCore import QFileInfo,QSettings
from ui_Options import Ui_MyDialog
import SOSimsubmerged as submerged
import SOSimsunken as sunken
from netCDF4 import Dataset
import numpy as np
import cv2
#___________________________________________________________________________________________________________________________
# Global Functions:
def LatLongConverter(longitude, latitude):
"""Code to convert WGS coordinates to UTM coordinates"""
lon = longitude
lat = latitude
# Constants:
# Datum Constants:
a = 6378137.0 # equatorial radius
b = 6356752.314 # polar radius
f = 0.003352811 # flattening
invf = 1.0/f # inverse flattening
rm = (a*b)**(1.0/2.0) # Mean radius
k0 = 0.9996 # scale factor
ecc = sqrt(1.0-(b/a)**2.0) # eccentricity
ecc2 = ecc*ecc/(1.0-ecc*ecc)
n = (a-b)/(a+b)
new = pi/(180.0*3600.0)
# Meridional Arc Constants
A0 = a*(1.0-n+(5.0*n*n/4.0)*(1.0-n) +(81.0*n**4.0/64.0)*(1.0-n))
B0 = (3.0*a*n/2.0)*(1.0 - n - (7.0*n*n/8.0)*(1.0-n) + 55.0*n**4.0/64.0)
C0 = (15.0*a*n*n/16.0)*(1.0 - n +(3.0*n*n/4.0)*(1.0-n))
D0 = (35.0*a*n**3.0/48.0)*(1.0 - n + 11.0*n*n/16.0)
E0 = (315.0*a*n**4.0/51.0)*(1.0-n)
# Calculations:
if lon > 0.0:
zone = int(lon/6)+31
if lon < 0.0:
zone = int((180+lon)/6)+1
lonCM = (6*zone)-183
Dlon = (lon-lonCM)*pi/180.0
lonrad = lon*pi/180.0
latrad = lat*pi/180.0
curvatureR1 = a*(1.0-ecc*ecc)/((1.0-(ecc*sin(latrad))**2.0)**(3.0/2.0))
curvatureR2 = a/((1.0-(ecc*sin(latrad))**2.0)**(1.0/2.0))
MeridArc = A0*latrad - B0*sin(2.0*latrad) + C0*sin(4.0*latrad) - D0*sin(6.0*latrad) + E0*sin(8.0*latrad)
k1 = MeridArc*k0
k2 = curvatureR2*sin(latrad)*cos(latrad)/2.0
k3 = ((curvatureR2*sin(latrad)*cos(latrad)**3.0)/24.0)*(5.0-tan(latrad)**2.0+9.0*ecc2*cos(latrad)**2.0+4.0*ecc2**2.0*cos(latrad)**4.0)*k0
k4 = curvatureR2*cos(latrad)*k0
k5 = (cos(latrad))**3.0*(curvatureR2/6.0)*(1.0-tan(latrad)**2.0+ecc2*cos(latrad)**2.0)*k0
k6 = ((Dlon)**6.0*curvatureR2*sin(latrad)*cos(latrad)**5.0/720.0)*(61.0-58.0*tan(latrad)**2.0+tan(latrad)**4.0+270.0*ecc2*cos(latrad)**2.0-330.0*ecc2*sin(latrad)**2.0)*k0
rawNorth = k1+(k2*Dlon**2.0)+(k3*Dlon**4.0)
if rawNorth < 0.0:
North = 10000000.0 + rawNorth
else:
North = rawNorth
East = 500000.0+(k4*Dlon)+(k5*Dlon**3.0)
location = [East/1000.0, North/1000.0, zone] # in km.
return location
def UTMConverter(easting, northing, zone):
"""Code to conver UTM coordinates to WGS coordinates"""
# Constants:
# Datum Constants:
a = 6378137.0 # equatorial radius
b = 6356752.314 # polar radius
k0 = 0.9996 # scale factor
ecc = sqrt(1.0-(b/a)**2.0) # eccentricity
ecc2 = ecc*ecc/(1.0-ecc*ecc)
# For calculations:
e1 = (1.0-(1.0-ecc*ecc)**(1.0/2.0))/(1.0+(1.0-ecc*ecc)**(1.0/2.0))
C1 = 3.0*e1/2.0-27.0*e1**3.0/32.0
C2 = 21.0*e1**2.0/16.0-55.0*e1**4.0/32.0
C3 = 151.0*e1**3.0/96.0
C4 = 1097.0*e1**4.0/512.0
if northing >= 0.0:
corrNorth = northing
else:
corrNorth = 10000000.0 - northing
eastPrime = 500000.0 - easting
arcLength = northing/k0
mu = arcLength/(a*(1.0-ecc**2.0/4.0-3.0*ecc**4.0/64.0-5.0*ecc**6.0/256.0))
footprintLat = mu+C1*sin(2.0*mu)+C2*sin(4.0*mu)+C3*sin(6.0*mu)+C4*sin(8.0*mu)
K1 = ecc2*cos(footprintLat)**2.0
T1 = tan(footprintLat)**2.0
N1 = a/(1.0-(ecc*sin(footprintLat))**2.0)**(1.0/2.0)
R1 = a*(1.0-ecc*ecc)/(1.0-(ecc*sin(footprintLat))**2.0)**(3.0/2.0)
D = eastPrime/(N1*k0)
# Coeficients for calculating latitude:
coef1 = N1*tan(footprintLat)/R1
coef2 = D*D/2.0
coef3 = (5.0+3.0*T1+10.0*C1-4.0*C1*C1-9.0*ecc2)*D**4.0/24.0
coef4 = (61.0+90.0*T1+298.0*C1+45.0*T1*T1-252.0*ecc2-3.0*C1*C1)*D**6.0/720.0
# Coefficients for calculating longitude:
coef5 = D
coef6 = (1.0+2.0*T1+C1)*D**3.0/6.0
coef7 = (5.0-2.0*C1+28.0*T1-3.0*C1**2.0+8.0*ecc2+24.0*T1**2.0)*D**5.0/120.0
deltalong = (coef5-coef6+coef7)/cos(footprintLat)
zoneCM = 6.0*zone-183.0
lat = 180.0*(footprintLat-coef1*(coef2+coef3+coef4))/pi
if northing >= 0.0:
lat = lat
else:
lat = -lat
lon = zoneCM - deltalong*180.0/pi
return [lon, lat]
def CalTime(a,b):
start = datetime.datetime.strptime(a, '%Y-%m-%d %H:%M:%S')
ends = datetime.datetime.strptime(b, '%Y-%m-%d %H:%M:%S')
diff = ends - start
return diff.total_seconds()/86400.
#__________________________________________________________________________________________________________________________
# QApplication Object:
__version__ = "1.0.0"
#__________________________________________________________________________________________________________________________
# MAC platform accessibility:
#__________________________________________________________________________________________________________________________
class SOSimMainWindow(QMainWindow, SOSimgui.Ui_SOSimMainWindow):
"""This class represents the main window and inherits from both the QMainWindow widget and the QtDesigner file."""
def __init__(self, parent=None):
super(SOSimMainWindow, self).__init__(parent)
global myfile_list
global myfile1_list
global myfile2_list
myfile_list = []
myfile1_list = []
myfile2_list = []
self.ourinformation = {}
self.ourinformation['CampaignButton'] = []
self.ourinformation['OurTime'] = []
self.ourinformation['HydroButton'] = []
# All in ui_SOMSim.py gets imported and GUI initialized:
self.setupUi(self)
self.popDialog = myDialog()
# Create map canvas:
self.canvas = QgsMapCanvas()
self.canvas.setCanvasColor(QColor(0,0,140))
self.canvas.enableAntiAliasing(True)
self.canvas.show()
# Add the canvas to its framed layout created with QtDesigner:
self.LayoutMap.addWidget(self.canvas)
# Create global, small map canvas:
self.globalcanvas = QgsMapCanvas()
self.globalcanvas.setCanvasColor(QColor(0,0,140))
self.globalcanvas.enableAntiAliasing(True)
self.globalcanvas.show()
# Add the global, small canvas to its framed layout created with QtDesigner:
self.LayoutGlobal.addWidget(self.globalcanvas)
# Create canvas for the variable legend:
self.legendcanvas = QgsMapCanvas()
self.legendcanvas.setCanvasColor(QColor(250,250,250))
self.legendcanvas.enableAntiAliasing(True)
self.legendcanvas.show()
# Add the legend canvas to its framed layout created with QtDesigner:
self.LayoutLegend.addWidget(self.legendcanvas)
#______________________new legend______________
self.outlegendcanvas = QgsMapCanvas()
self.outlegendcanvas.setCanvasColor(QColor(250,250,250))
self.outlegendcanvas.enableAntiAliasing(True)
self.outlegendcanvas.show()
self.LayoutLegendHor.addWidget(self.outlegendcanvas)
#______________________________________________
# create the actions behaviours
self.connect(self.actionAddLayer, SIGNAL("triggered()"), self.addRasterImage)
self.connect(self.actionZoomIn, SIGNAL("triggered()"), self.zoomIn)
self.connect(self.actionZoomOut, SIGNAL("triggered()"), self.zoomOut)
self.connect(self.actionPan, SIGNAL("triggered()"), self.pan)
self.connect(self.actionCaptureCoordinates, SIGNAL("triggered()"), self.captureCoords)
self.connect(self.actionSave_Image, SIGNAL("triggered()"), self.fileSaveAsImage)
self.connect(self.actionSave_Calibration_As, SIGNAL("triggered()"), self.fileSaveCalibrationAs)
self.connect(self.actionCurrent_Image, SIGNAL("triggered()"), self.filePrint)
self.connect(self.actionQuit, SIGNAL("triggered()"), self.fileQuit)
self.connect(self.actionNew, SIGNAL("triggered()"), self.fileNew)
self.connect(self.actionExisting_Output_Image, SIGNAL("triggered()"), self.addRasterImage)
self.connect(self.actionOpen, SIGNAL("triggered()"), self.addRasterImage)
self.connect(self.actionSave, SIGNAL("triggered()"), self.fileSave)
self.connect(self.actionDefaultSettings, SIGNAL("triggered()"), self.optionsDefSettings)
# create file toolbar:
self.fileToolbar = self.addToolBar("File");
self.fileToolbar.setObjectName("FileToolBar")
self.fileToolbar.addAction(self.actionOpen)
self.fileToolbar.addAction(self.actionSave)
self.fileToolbar.addAction(self.actionNew)
# create map toolbar and place it to the right of the canvas:
self.mapToolbar = self.addToolBar("Map") #changed by the following line to put it vertical
self.mapToolbar.setObjectName("MapToolBar")
self.mapToolbar.addAction(self.actionAddLayer)
self.mapToolbar.addAction(self.actionCaptureCoordinates)
self.mapToolbar.addAction(self.actionPan)
self.mapToolbar.addAction(self.actionZoomIn)
self.mapToolbar.addAction(self.actionZoomOut)
# Create the map tools
self.toolPan = QgsMapToolPan(self.canvas)
self.toolPan.setAction(self.actionPan)
self.toolZoomIn = QgsMapToolZoom(self.canvas, False) # false = in
self.toolZoomIn.setAction(self.actionZoomIn)
self.toolZoomOut = QgsMapToolZoom(self.canvas, True) # true = out
self.toolZoomOut.setAction(self.actionZoomOut)
self.toolCaptureCoordinates = QgsMapToolEmitPoint(self.canvas)
self.toolCaptureCoordinates.setAction(self.actionCaptureCoordinates)
#Scale options
self.ScaleFrame.hide()
self.SureButton.hide()
# Nodes options:
self.NodesFrame.hide()
self.connect(self.UserDefinedNodesRadioButton, SIGNAL("toggled(bool)"), self.NodesFrame, SLOT("setVisible(bool)"))
# Layerset:
self.layers = []
# Show the world base map and base legend:
self.MyWorldLayer(ext = True)
self.MyWorldLayerGlobalCanvas()
self.MyLegendLoad(SOSimPath+"Data/LegendLandOcean.jpg", "LegendLandOcean.jpg")
# Other missing in ui:
self.lineEdit.setAlignment(Qt.AlignHCenter)
self.lineEdit.setText(str(0.0)+ " , " +str(0.0))
self.RecalcButton.setEnabled(True)
self.NodataButton.setVisible(False)
self.UTMButton.setVisible(False)
self.DecimalButton.setVisible(False)
#___________________________________________________________________________________________________________________
# FOR THE CORE: passing variables:
self.lon0 = 0.0
self.lat0 = 0.0
self.x0 = 0.0
self.y0 = 0.0
self.zone0 = 1
self.DLx = [[0.0], [0.0], [0.0], [0.0], [0.0], [0.0], [0.0], [0.0], [0.0], [0.0]]
self.DLy = [[0.0], [0.0], [0.0], [0.0], [0.0], [0.0], [0.0], [0.0], [0.0], [0.0]]
self.DLcon = [[0.0], [0.0], [0.0], [0.0], [0.0], [0.0], [0.0], [0.0], [0.0], [0.0]]
self.DLzone = [[0.0], [0.0], [0.0], [0.0], [0.0], [0.0], [0.0], [0.0], [0.0], [0.0]]
self.st = []
self.allCampaignIndices = []
self.importedCalibration = bool
self.sx0 = 0.050 # in km, = to 50 meters.
self.sy0 = 0.050
self.TimeSamp = zeros(4)
self.t = []
self.filename = None
self.admST = 0
self.retardation = 0.5
self.sunkx0 = 0.0
self.sunky0 = 0.0
self.x_max = 0.0
self.x_min = 0.0
self.y_max = 0.0
self.y_min = 0.0
self.markers = []
self.markersglobal = []
self.northmarker = []
self.xclicks = []
self.yclicks = []
self.openOcean = False
#self.morning = [self.t[0], 0] #### REVISE ACCORDING TO MIN(ST)
self.morning = [0, 0]
self.genForward = self.nextTimeGenerator()
#self.evening = [self.t[len(self.t)-1], len(self.t)]
self.evening = [0, 0]
self.genBackward = self.prevTimeGenerator()
# FOr passing from Run method to Recalculate method:
self.LikelihoodFunction = []
self.args = []
# To self.RecalcButton.setEnabled(True) and to keep print settings:
self.printer = None
#print len(self.layers)
#_____________________________________________________________________________________________________________________
# Methods to print, save image, etc.
def optionsDefSettings(self):
self.popDialog.show()
def filePrint(self):
if self.printer is None:
self.printer = QPrinter(QPrinter.HighResolution)
self.printer.setPageSize(QPrinter.Letter)
form = QPrintDialog(self.printer, self)
if form.exec_():
painter = QPainter(self.printer)
rect = painter.viewport()
size = self.canvas.size()
size.scale(rect.size(), Qt.KeepAspectRatio)
painter.setViewport(rect.x(), rect.y(), size.width(), size.height())
painter.drawImage(0, 0, self.canvas)
def fileQuit(self):
QApplication.closeAllWindows()
def fileNew(self):
SOSimMainWindow().show()
def fileSave(self):
what = QMessageBox.question(self, "SOSim - Save %s" % self.spillName(),
"You are about to save your project's current calibration file. Press 'No' if you whish to save your output map instead.",
QMessageBox.Yes|QMessageBox.No)
what
if what == QMessageBox.Yes:
self.fileSaveCalibrationAs()
if what == QMessageBox.No:
self.fileSaveAsImage()
def fileSaveCalibrationAs(self):
self.filename = "YourCalibration %s.txt" % self.spillName()
fname = self.filename if self.filename is not None else "."
format = "*.txt"
fname = unicode(QFileDialog.getSaveFileName(self,
"SOSim - Save Calibration As...",
fname,
"Calibration files (%s)" % " ".join(format)))
if fname:
if "." not in fname:
fname | |
"""Implements a class for Latin Square puzzles.
A Latin Square is a square grid of numbers from 1..N, where a number may not
be repeated in the same row or column. Such squares form the basis of puzzles
like Sudoku, Kenken(tm), and their variants.
Classes:
LatinSquare: Implements a square puzzle constrained by not repeating
values in the same row or column.
Functions:
build_empty_grid: Build a 2D array (list of lists) for puzzle.
char2int: Convert bewteen character and integer representation of a cell value.
int2char: Reverse of char2int.
count_clues: Given a string or 2D array representing a puzzle, return
the number of starting clues in the puzzle.
from_string: Given a string representing a puzzle, return the 2D array
equivalent. All class methods expect the array version.
"""
DEFAULT_PUZZLE_SIZE = 9
EMPTY_CELL = None
# Have only tested up to 25x25 so set that max size here
# higher values may work but aren't tested
MAX_PUZZLE_SIZE = 25
MIN_PUZZLE_SIZE = 1
MIN_CELL_VALUE = 1 # 0 evals to False so can be confused with EMPTY_CELL
CELL_VALUES = "123456789ABCDEFGHIJKLMNOP"
assert MAX_PUZZLE_SIZE == len(CELL_VALUES)
def build_empty_grid(grid_size):
"""Builds a 2D array grid_size * grid_size, each cell element is None."""
assert MIN_PUZZLE_SIZE <= grid_size <= MAX_PUZZLE_SIZE
ret = [[] for x in range(grid_size)]
for x in range(grid_size):
ret[x] = [EMPTY_CELL for y in range(grid_size)]
return ret
def char2int(char):
"""Converts character char to an int representation."""
if char in (".", "0"):
return EMPTY_CELL
return CELL_VALUES.index(char) + 1
def int2char(value):
"""Converts back from an int value to character value for a cell."""
if not value:
return "."
return CELL_VALUES[value - 1]
def count_clues(puzzle_grid):
"""Counts clues in a puzzle_grid, which can be a list of lists or string."""
if isinstance(puzzle_grid, list):
return sum([1 for sublist in puzzle_grid for i in sublist if i])
return len(puzzle_grid) - puzzle_grid.count(".")
def from_string(puzzle_string):
"""Takes a string and converts it to a list of lists of integers.
Puzzles are expected to be 2D arrays of ints, but it's convenient to store
test data as strings (e.g. '89.4...5614.35..9.......8..9.....'). So this
will split a string (using period for "empty cell") and return the 2D array.
Args:
puzzle_string: A string with 1 character per cell. Use uppercase letters
for integer values >= 10 (A=10; B=11; etc). Trailing blanks are
stripped.
Returns:
A list of lists of ints.
Raises:
ValueError: puzzle_string length is not a square (e.g. 4, 9, 16, 25);
or a character value in string is out of range.
"""
s = puzzle_string.rstrip()
grid_size = int(len(s) ** (1 / 2))
if not MIN_PUZZLE_SIZE <= grid_size <= MAX_PUZZLE_SIZE:
raise ValueError(f"puzzle_string {grid_size}x{grid_size} is out of range")
if grid_size ** 2 != len(s):
raise ValueError(f"puzzle_string {grid_size}x{grid_size} is not a square")
ret = build_empty_grid(grid_size)
for i, ch in enumerate(s):
v = char2int(ch)
if v and MIN_CELL_VALUE <= v <= grid_size:
ret[i // grid_size][i % grid_size] = v
elif v:
raise ValueError(f"Cell value {v} at {i} out of range [1:{grid_size}]")
return ret
class LatinSquare:
"""Implements a Latin Square "puzzle".
A Latin Square is a 2D matrix where the values in each cell cannot be
repeated in the same row or column.
Dimensions are always square (ie. width==height==grid_size). If no
values are passed to constructor, will build an empty grid of size
DEFAULT_PUZZLE_SIZE (9).
Attributes:
size: Dimensions of the square (length, height) in a tuple.
num_cells: Total number of cells (grid_size * grid_size)
max_value: Equal to grid_size, it's the max value of a cell, and
also the grid's length and height.
complete_set: Set of values from [1..max_value] that must exist once
in each row and column in a solved puzzle.
Args:
starting_grid: A list of lists of integers (2D array of ints).
Pass None to start with an empty grid.
grid_size: The number of cells for the width and height of the
grid. Default value is 9, for a 9x9 grid (81 cells). If not
set, size is set to len(starting_grid), otherwise must be
consistent with len(starting_grid) as a check for "bad" data.
Raises:
ValueError: An inconsistency exists in the starting_grid;
or the grid_size is too small or too large (1 to 25)
"""
def __init__(self, grid_size=None, starting_grid=None):
# If a starting_grid is passed, that sets the size
if starting_grid and grid_size:
if len(starting_grid) != grid_size:
raise ValueError(f"starting_grid is not {grid_size}x{grid_size}")
elif starting_grid:
grid_size = len(starting_grid)
elif grid_size is None:
grid_size = DEFAULT_PUZZLE_SIZE
if not MIN_PUZZLE_SIZE <= grid_size <= MAX_PUZZLE_SIZE:
raise ValueError(
f"grid_size={grid_size} outside [{MIN_PUZZLE_SIZE}:{MAX_PUZZLE_SIZE}]"
)
# Attributes
self.size = (grid_size, grid_size)
self.num_cells = grid_size * grid_size
self.max_value = grid_size
self.complete_set = set(range(MIN_CELL_VALUE, grid_size + 1))
# Protected
self._grid = build_empty_grid(grid_size)
self.__num_empty_cells = grid_size * grid_size
# Initialize constraints
self.__allowed_values_for_row = [
set(self.complete_set) for i in range(grid_size)
]
self.__allowed_values_for_col = [
set(self.complete_set) for i in range(grid_size)
]
# Accept a starting puzzle
if starting_grid:
self.init_puzzle(starting_grid)
def init_puzzle(self, starting_grid):
"""Initializes a puzzle grid based on contents of starting_grid.
Clears the existing puzzle and resets internal state (e.g. count of
empty cells remaining).
Args:
starting_grid: A list of lists of integers (2D array of ints).
To help catch data errors, must be the same size as what the
instance was initialized for.
Raises:
ValueError: Size of starting_grid (len) is not what was expected
from the initial grid_size; or constraint on cell values is
violated (e.g. dupicate value in a row)
"""
self.clear_all()
# Check that new grid is correct number of rows
if len(starting_grid) != self.max_value:
raise ValueError(f"Exepect {self.max_value} rows, got {len(starting_grid)}")
# Check that new grid has correct number of cols
for x, row in enumerate(starting_grid):
if len(row) != self.max_value:
raise ValueError(
f"Expect {self.max_value} columns in row {x}, got {len(row)}")
for y, val in enumerate(row):
if val:
self.set(x, y, val)
def num_empty_cells(self):
"""Returns the number of empty cells remaining."""
return self.__num_empty_cells
def get(self, x, y):
"""Returns the cell value at (x, y)"""
return self._grid[x][y]
def set(self, x, y, value):
"""Sets the call at x,y to value
The set operation must obey the rules of the contraints. In this class
- no value can be repeated in a row
- no value can be repeated in a column
If a constraint is violated then a ValueError exception is raised.
Args:
x, y: Cell position in row, column order.
value: Integer value to write into the cell.
Raises:
ValueError: Cell value out of range [1:max_value]
IndexError: x,y location out of range [0:max_value-1]
"""
if value < MIN_CELL_VALUE or value > self.max_value:
raise ValueError(f"Value {value} out of range [{MIN_CELL_VALUE}:{self.max_value}]")
if self._grid[x][y] == value:
return
# Clear value first to update constraints
if self._grid[x][y]:
self.clear(x, y)
# Write value if allowed
if value in self.get_allowed_values(x, y):
self._grid[x][y] = value
self.__num_empty_cells -= 1
else:
raise ValueError(f"Value {value} not allowed at {x},{y}")
# Update constraints
self.__allowed_values_for_row[x].remove(value)
self.__allowed_values_for_col[y].remove(value)
def clear(self, x, y):
"""Clears the value for a cell at x,y and update constraints"""
# Is OK to "clear" an already empty cell (no-op)
if self._grid[x][y] == EMPTY_CELL:
return
# Stash previous value before clearing, to update constraints
prev = self._grid[x][y]
self._grid[x][y] = EMPTY_CELL
self.__num_empty_cells += 1
# Put previous value back into allowed list
self.__allowed_values_for_row[x].add(prev)
self.__allowed_values_for_col[y].add(prev)
def clear_all(self):
"""Clears the entire puzzle grid"""
for x in range(self.max_value):
for y in range(self.max_value):
self.clear(x, y)
def is_empty(self, x, y):
"""Returns True if the cell is empty"""
return self._grid[x][y] == EMPTY_CELL
def find_empty_cell(self):
"""Returns the next empty cell as tuple (x, y)
Search starts at 0,0 and continues along the row. Returns at the first
empty cell found. Returns empty tuple if no empty cells left.
"""
for x, row in enumerate(self._grid):
for y, v in enumerate(row):
if not v:
return (x, y)
return ()
def next_empty_cell(self):
"""Generator that returns the next empty cell that exists in the grid
Search starts at 0,0, just like `find_empty_cell`. However each
subsequent call will resume where the previous invocation left off
(assuming this is being called as a generator function). Returns an
empty tuple at the end of the list.
"""
for x, row in enumerate(self._grid):
for | |
################################################################################
# Copyright (c) 2017 <NAME>, <NAME>, <NAME>
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
################################################################################
"""@package python_scripts
A python for tuning based on fuzzing and Bayesian Optimisation
"""
import json
import sys
import os
import math
import socket
import pickle
from termcolor import colored
from time import time
from random import randrange, uniform, choice, random, shuffle, seed
from collections import OrderedDict
from copy import deepcopy
# optimization packages
from bayes_opt import BayesianOptimization
from simanneal import Annealer
# my packages
from run_sut_stress import SutStress
from common import ExperimentInfo, DataLog
class ConfigurableEnemy:
"""
Object that hold all information about an enemy
"""
def __init__(self, template=None, data_file=None):
"""
Initialise an enemy with template file and template data
:param template: The C template file that contains the enemy process
:param data_file: The JSON file containing the maximum data range for the template
"""
self._t_file = template
self._d_file = data_file
self._read_range_data()
self._define_range = None
self._defines = dict()
def __str__(self):
"""
:return: Template name and defines
"""
string = "Template: " + str(self._t_file)
for key in self._defines:
string += "\t" + str(key) + " " + str(self._defines[key])
string += "\n"
return string
def set_template(self, template_file, data_file):
"""
Set the template C file and the JSON with the data ranges
:param template_file: Template C file
:param data_file: JSON file with data range
:return:
"""
self._t_file = template_file
self._d_file = data_file
self._read_range_data()
self.random_instantiate_defines()
def get_template(self):
"""
:return: Get the enemy template file
"""
return self._t_file
def get_defines_range(self):
"""
Return the define dictionary, the way BO wants it
:return: A dictionary with param as keyword and a tuple with (min,max)
"""
data_range = {}
for param in self._define_range:
min_val = self._define_range[param]["range"][0]
max_val = self._define_range[param]["range"][1]
data_range[str(param)] = (min_val, max_val)
return data_range
def _read_range_data(self):
"""
Read the template JSON data from the d_file and store in in defines
:return:
"""
if self._t_file is None:
return
# Read the configuration in the JSON file
with open(self._d_file) as data_file:
template_object = json.load(data_file)
try:
self._define_range = template_object["DEFINES"]
except KeyError:
print("Unable to find DEFINES in JSON")
def set_defines(self, defines):
"""
:return:
"""
def_param = dict()
# Make sure that the parameters are of the correct type
# Workaround to force BO to generate int when needed
for key in defines:
if self._define_range[key]["type"] == "int":
def_param[key] = int(defines[key])
elif self._define_range[key]["type"] == "float":
def_param[key] = float(defines[key])
else:
print("Unknown data type for param " + str(key))
sys.exit(1)
self._defines = def_param
def get_defines(self):
"""
:return: A dict of defines
"""
return self._defines
def random_instantiate_defines(self):
"""
Instantiate the template with random values
:return:
"""
self._defines = {}
for param in self._define_range:
min_val = self._define_range[param]["range"][0]
max_val = self._define_range[param]["range"][1]
if self._define_range[param]["type"] == "int":
self._defines[param] = randrange(min_val, max_val)
elif self._define_range[param]["type"] == "float":
self._defines[param] = uniform(min_val, max_val)
else:
print("Unknown data type for param " + str(param))
sys.exit(1)
def neighbour(self):
"""
A generator for the neighbour defines
"""
random_key = choice(list(self._defines))
min_val = self._define_range[random_key]["range"][0]
max_val = self._define_range[random_key]["range"][1]
if self._define_range[random_key]["type"] == "int":
temp = deepcopy(self)
temp._defines[random_key] = randrange(min_val, max_val)
return temp
elif self._define_range[random_key]["type"] == "float":
temp = deepcopy(self)
temp._defines[random_key] = uniform(min_val, max_val)
return temp
else:
print("Unknown data type for param " + str(random_key))
sys.exit(1)
def create_bin(self, output_file):
"""
:param output_file: The name of the file that will be outputted
:return:
"""
defines = ["-D" + d + "=" + str(self._defines[d]) for d in self._defines]
cmd = "gcc -std=gnu11 -Wall -Wno-unused-variable " + " ".join(defines) + " " \
+ self._t_file + " -lm" + " -o " + output_file
print("Compiling:", cmd)
os.system(cmd)
class EnemyConfiguration:
"""
Hold the configuration on how an attack should look like
"""
def_files = OrderedDict([
("../templates/bus/template_bus.c",
"../templates/bus/parameters.json"),
("../templates/cache/template_cache.c",
"../templates/cache/parameters.json"),
("../templates/mem/template_mem.c",
"../templates/mem/parameters.json"),
("../templates/pipeline/template_pipeline.c",
"../templates/pipeline/parameters.json")
])
def __init__(self, enemy_cores):
"""
If no template file and data file is provided we use all of them since every configuration is possible
:param enemy_cores: The total number of enemy processes
"""
self.enemy_cores = enemy_cores
self.enemies = []
for i in range(self.enemy_cores):
enemy = ConfigurableEnemy()
self.enemies.append(enemy)
# If this variable is true, the template across all enemies
self.fixed_template = False
# If this variable is true, all templates have the same parameters
self.same_defines = False
self.random_set_all()
def __str__(self):
"""
:return: The template and defines for each core
"""
string = ""
for enemy in self.enemies:
string += str(enemy)
string += "\n"
return string
def set_fixed_template(self, fix_template):
"""
Set weather it is the same template across all enemies
:param fix_template: Boolean variable
:return:
"""
self.fixed_template = fix_template
def set_same_defines(self, same_defines):
"""
Set weather all enemies have the same defines
:param same_defines: Boolean variable
:return:
"""
self.same_defines = same_defines
if same_defines:
defines = self.enemies[0].get_defines()
for i in range(1, self.enemy_cores):
self.enemies[i].set_defines(defines)
def neighbour_template(self):
"""
A generator for the configs with different templates
"""
list_cores= list(range(self.enemy_cores))
shuffle(list_cores)
for core in list_cores:
template = self.enemies[core].get_template()
keyList = sorted(self.def_files.keys())
for i, v in enumerate(keyList):
if v == template:
self_copy = deepcopy(self)
index = (i+1) % len(self.def_files)
self_copy.enemies[core].set_template(keyList[index], self.def_files[keyList[index]])
self_copy.enemies[core].random_instantiate_defines()
yield self_copy
def neighbour_define(self):
"""
A generator for configs with different defines
"""
if self.same_defines:
temp = deepcopy(self)
neighbour = temp.enemies[0].neighbour()
defines = neighbour.get_defines()
for i in range(0, temp.enemy_cores):
temp.enemies[i].set_defines(defines)
else:
enemy = randrange(self.enemy_cores)
temp = deepcopy(self)
temp.enemies[enemy] = self.enemies[enemy].neighbour()
return temp
def set_all_templates(self, t_file, t_data_file):
"""
Sets the templates to all enemies and sets the flag to not modify them
:param t_file: The template file to be used on all enemy processes
:param t_data_file: The template data file to be used on all enemy processes
:return:
"""
for i in range(self.enemy_cores):
self.enemies[i].set_template(t_file, t_data_file)
self.fixed_template = True
def random_set_all_templates(self):
"""
Randomly set what type of enemy process you have
:return: A dict of assigned templates
"""
for i in range(self.enemy_cores):
template_file, json_file = choice(list(EnemyConfiguration.def_files.items()))
self.enemies[i].set_template(template_file, json_file)
return self.get_all_templates()
def random_set_all_defines(self):
"""
Randomly instantiate the parameters of the enemy
:return:
"""
if self.same_defines:
self.enemies[0].random_instantiate_defines()
defines = self.enemies[0].get_defines()
for i in range(1, self.enemy_cores):
self.enemies[i].set_defines(defines)
else:
for i in range(self.enemy_cores):
self.enemies[i].random_instantiate_defines()
def random_set_all(self):
"""
If the template type is not specified, set the template and defines
If the template is set, just set the defines
:return: A tuple of the set templates and defines
"""
if self.fixed_template:
self.random_set_all_defines()
else:
self.random_set_all_templates()
self.random_set_all_defines()
return self
def get_all_templates(self):
"""
:return: A dict that contains core and its corresponding template
"""
templates = {}
for i in range(self.enemy_cores):
templates[i] = self.enemies[i].get_template()
return templates
def get_all_defines(self):
"""
:return: A dict that contains core and its corresponding defines
"""
defines = {}
for i in range(self.enemy_cores):
defines[i] = self.enemies[i].get_defines()
return defines
def get_file_mapping(self, prefix="", output_folder=""):
"""
Generated enemy files
:param prefix: The prefix added to the filename
:param output_folder: The output folder of the enemies
:return: A dict representing a mapping of enemy files to cores
"""
enemy_mapping = dict()
for i in range(self.enemy_cores):
filename = output_folder + prefix + str(i+1) + "_enemy"
self.enemies[i].create_bin(filename)
# Start mapping the enemies from core 1
enemy_mapping[i + 1] = filename
return enemy_mapping
class ObjectiveFunction:
"""
Class to evaluate an enemy config
"""
def __init__(self, experiment_info, log, socket_connect=None):
"""
:param experiment_info: An experiment info object
:param log: A data log object
:param socket_connect: Socket if the network | |
[False, True])
def test_numpy_properties(self):
data = np.arange(6).reshape(3, 2)
arr = TensorArray(data)
self.assertEqual(arr.numpy_ndim, data.ndim)
self.assertEqual(arr.numpy_shape, data.shape)
self.assertEqual(arr.numpy_dtype, data.dtype)
def test_bool_tensor_selection(self):
data = TensorArray([[1, 2], [3, 4], [5, 6]])
sel = TensorArray([True, False, True])
expected = np.array([[1, 2], [5, 6]])
# Test TensorArray.__getitem__ with TensorArray
result = data[sel]
npt.assert_array_equal(result, expected)
# Test Series of TensorDtype selection with numpy array
s = pd.Series(data)
result = s[np.asarray(sel)]
npt.assert_array_equal(result, expected)
# Test Series of TensorDtype selection with TensorArray
# Currently fails due to Pandas not recognizing as bool index GH#162
if LooseVersion(pd.__version__) >= LooseVersion("1.1.0"):
with self.assertRaises(Exception):
result = s[sel]
else:
result = s[sel]
npt.assert_array_equal(result, expected)
def test_int_tensor_selection(self):
data = TensorArray([[1, 2], [3, 4], [5, 6]])
sel = TensorArray([0, 2])
expected = np.array([[1, 2], [5, 6]])
# Test TensorArray.__getitem__ with TensorArray
result = data[sel]
npt.assert_array_equal(result, expected)
# Test Series of TensorDtype selection with numpy array
s = pd.Series(data)
result = s[np.asarray(sel)]
npt.assert_array_equal(result, expected)
# Test Series of TensorDtype selection with TensorArray
result = s[sel]
npt.assert_array_equal(result, expected)
# Test Series of TensorDtype selection by integer location
result = s.iloc[sel]
npt.assert_array_equal(result, expected)
def test_2d_int_tensor_selection(self):
data = TensorArray([[1, 2], [3, 4], [5, 6]])
sel = TensorArray([[0, 1], [1, 2]])
expected = np.array([[[1, 2], [3, 4]],
[[3, 4], [5, 6]]])
# Test TensorArray.__getitem__ with TensorArray
result = data[sel]
npt.assert_array_equal(result, expected)
# Test Series of TensorDtype selection with numpy array
# Currently fails with: ValueError: Cannot index with multidimensional key
s = pd.Series(data)
with self.assertRaises(ValueError):
result = s[np.asarray(sel)]
# Test Series of TensorDtype selection with TensorArray
# Currently fails with: TypeError: 'TensorElement' object is not iterable
# TODO: not sure if this should be allowed
s = pd.Series(data)
with self.assertRaises(Exception):
result = s[sel]
# Test Series of TensorDtype selection by integer location
if LooseVersion(pd.__version__) >= LooseVersion("1.1.0"):
s = pd.Series(data)
result = s.iloc[sel]
npt.assert_array_equal(result, expected)
def test_inferred_type(self):
arr = TensorArray([0, 2])
self.assertEqual(arr.inferred_type, "integer")
arr = TensorArray([True, False, True])
self.assertEqual(arr.inferred_type, "boolean")
def test_numpy_ufunc(self):
def verify_ufunc_result(result_, expected_):
self.assertTrue(isinstance(result_, TensorArray))
npt.assert_array_equal(result_, expected_)
data = np.arange(10).reshape(5, 2)
arr = TensorArray(data)
# ufunc with number
result = np.add(arr, 1)
expected = np.add(data, 1)
verify_ufunc_result(result, expected)
result = np.add(1, arr)
verify_ufunc_result(result, expected)
# ufunc with ndarray
result = np.add(arr, data)
expected = np.add(data, data)
verify_ufunc_result(result, expected)
result = np.add(data, arr)
verify_ufunc_result(result, expected)
# ufunc with another TensorArray
result = np.add(arr, arr)
expected = np.add(data, data)
verify_ufunc_result(result, expected)
class TensorArrayDataFrameTests(unittest.TestCase):
def test_create(self):
x = np.array([[1, 2], [3, 4], [5, 6]])
s = TensorArray(x)
df = pd.DataFrame({"i": list(range(len(x))), "tensor": s})
self.assertEqual(len(df), len(x))
def test_sum(self):
keys = ["<KEY>"]
values = np.array([[1, 1]] * len(keys))
df = pd.DataFrame({"key": keys, "value": TensorArray(values)})
result_df = df.groupby("key").aggregate({"value": "sum"})
# Check array gets unwrapped from TensorElements
arr = result_df["value"].array
self.assertEqual(arr.numpy_dtype, values.dtype)
npt.assert_array_equal(arr.to_numpy(), [[2, 2], [1, 1], [3, 3]])
# Check the resulting DataFrame
self.assertEqual(
repr(result_df),
textwrap.dedent(
"""\
value
key
a [2, 2]
b [1, 1]
c [3, 3]"""
),
)
# 2D values
values2 = np.array([[[1, 1], [1, 1]]] * len(keys))
df2 = pd.DataFrame({"key": keys, "value": TensorArray(values2)})
result2_df = df2.groupby("key").aggregate({"value": "sum"})
# Check array gets unwrapped from TensorElements
arr2 = result2_df["value"].array
self.assertEqual(arr2.numpy_dtype, values.dtype)
npt.assert_array_equal(arr2.to_numpy(),
[[[2, 2], [2, 2]], [[1, 1], [1, 1]], [[3, 3], [3, 3]]])
# Check the resulting DataFrame
self.assertEqual(
repr(result2_df),
textwrap.dedent(
"""\
value
key
a [[2, 2], [2, 2]]
b [[1, 1], [1, 1]]
c [[3, 3], [3, 3]]"""
),
)
def test_bool_indexing_dataframe(self):
s = TensorArray([[1, 2], [3, 4]])
df = pd.DataFrame({
"col1": s
})
result = df[[False, False]]
self.assertTrue(isinstance(result, pd.DataFrame))
self.assertEqual(len(result), 0)
result = df[[True, True]]
self.assertTrue(isinstance(result, pd.DataFrame))
pd.testing.assert_frame_equal(result, df)
result = df[[True, False]]
self.assertTrue(isinstance(result, pd.DataFrame))
self.assertEqual(len(result), 1)
expected = df.iloc[[0]]
pd.testing.assert_frame_equal(result, expected)
result = df[[False, True]]
self.assertTrue(isinstance(result, pd.DataFrame))
self.assertEqual(len(result), 1)
expected = df.iloc[[1]]
pd.testing.assert_frame_equal(result, expected)
def test_bool_indexing_series(self):
s = pd.Series(TensorArray([[1, 2], [3, 4]]))
result = s[[False, False]]
self.assertTrue(isinstance(result, pd.Series))
self.assertEqual(len(result), 0)
result = s[[True, True]]
self.assertTrue(isinstance(result, pd.Series))
pd.testing.assert_series_equal(result, s)
result = s[[True, False]]
self.assertTrue(isinstance(result, pd.Series))
self.assertEqual(len(result), 1)
expected = s.iloc[[0]]
pd.testing.assert_series_equal(result, expected)
result = s[[False, True]]
self.assertTrue(isinstance(result, pd.Series))
self.assertEqual(len(result), 1)
expected = s.iloc[[1]]
pd.testing.assert_series_equal(result, expected)
def test_sort(self):
arr = TensorArray(np.arange(6).reshape(3, 2))
date_range = pd.date_range('2018-01-01', periods=3, freq='H')
df = pd.DataFrame({"time": date_range, "tensor": arr})
df = df.sort_values(by="time", ascending=False)
self.assertEqual(df["tensor"].array.numpy_dtype, arr.numpy_dtype)
expected = np.array([[4, 5], [2, 3], [0, 1]])
npt.assert_array_equal(df["tensor"].array, expected)
def test_large_display_numeric(self):
# Test integer, uses IntArrayFormatter
df = pd.DataFrame({"foo": TensorArray(np.array([[1, 2]] * 100))})
self.assertEqual(
repr(df),
textwrap.dedent(
"""\
foo
0 [1, 2]
1 [1, 2]
2 [1, 2]
3 [1, 2]
4 [1, 2]
.. ...
95 [1, 2]
96 [1, 2]
97 [1, 2]
98 [1, 2]
99 [1, 2]
[100 rows x 1 columns]"""
)
)
# Test float, uses FloatArrayFormatter
df = pd.DataFrame({"foo": TensorArray(np.array([[1.1, 2.2]] * 100))})
self.assertEqual(
repr(df),
textwrap.dedent(
"""\
foo
0 [1.1, 2.2]
1 [1.1, 2.2]
2 [1.1, 2.2]
3 [1.1, 2.2]
4 [1.1, 2.2]
.. ...
95 [1.1, 2.2]
96 [1.1, 2.2]
97 [1.1, 2.2]
98 [1.1, 2.2]
99 [1.1, 2.2]
[100 rows x 1 columns]"""
)
)
def test_numeric_display_3D(self):
# Verify using patched method
from pandas.io.formats.format import ExtensionArrayFormatter
self.assertTrue(
ExtensionArrayFormatter._patched_by_text_extensions_for_pandas)
# Test integer format 3D values, uses IntArrayFormatter
df = pd.DataFrame({"foo": TensorArray([[[1, 1], [2, 2]],
[[3, 3], [4, 4]]])})
self.assertEqual(
repr(df),
textwrap.dedent(
"""\
foo
0 [[1, 1], [2, 2]]
1 [[3, 3], [4, 4]]"""
)
)
# Test floating format 3D values, uses FloatArrayFormatter
df = pd.DataFrame({"foo": TensorArray([[[1.1, 1.1], [2.2, 2.2]],
[[3.3, 3.3], [4.4, 4.4]]])})
self.assertEqual(
repr(df),
textwrap.dedent(
"""\
foo
0 [[1.1, 1.1], [2.2, 2.2]]
1 [[3.3, 3.3], [4.4, 4.4]]"""
)
)
def test_large_display_string(self):
# Verify using patched method
# Unpatched method doesn't work for Pandas 1.0.x but fixed in later versions
from pandas.io.formats.format import ExtensionArrayFormatter
self.assertTrue(
ExtensionArrayFormatter._patched_by_text_extensions_for_pandas)
# Uses the GenericArrayFormatter
df = pd.DataFrame({"foo": TensorArray(np.array([["Hello", "world"]] * 100))})
self.assertEqual(
repr(df),
textwrap.dedent(
"""\
foo
0 [ Hello, world]
1 [ Hello, world]
2 [ Hello, world]
3 [ Hello, world]
4 [ Hello, world]
.. ...
95 [ Hello, world]
96 [ Hello, world]
97 [ Hello, world]
98 [ Hello, world]
99 [ Hello, world]
[100 rows x 1 columns]"""
)
)
def test_display_time(self):
# Verify using patched method
from pandas.io.formats.format import ExtensionArrayFormatter
self.assertTrue(
ExtensionArrayFormatter._patched_by_text_extensions_for_pandas)
# datetime64 2D, Uses Datetime64Formatter
times = pd.date_range('2018-01-01', periods=5, freq='H').to_numpy()
times_repeated = np.tile(times, (3, 1))
times_array = TensorArray(times_repeated)
df = pd.DataFrame({"t": times_array})
self.assertEqual(
repr(df),
textwrap.dedent(
"""\
t
0 [2018-01-01 00:00:00, 2018-01-01 01:00:00, 201...
1 [2018-01-01 00:00:00, 2018-01-01 01:00:00, 201...
2 [2018-01-01 00:00:00, 2018-01-01 01:00:00, 201..."""
)
)
# datetime64 3D, Uses Datetime64Formatter
times = pd.date_range('2018-01-01', periods=4, freq='H').to_numpy()
times = times.reshape(2, 2)
times_repeated = np.tile(times, (3, 1, 1))
times_array = TensorArray(times_repeated)
df = pd.DataFrame({"t": times_array})
self.assertEqual(
repr(df),
textwrap.dedent(
"""\
t
0 [[2018-01-01 00:00:00, 2018-01-01 01:00:00], [...
1 [[2018-01-01 00:00:00, 2018-01-01 01:00:00], [...
2 [[2018-01-01 00:00:00, 2018-01-01 01:00:00], [..."""
)
)
# datetime64tz, Uses Datetime64TZFormatter
import dateutil
from datetime import datetime
utc = dateutil.tz.tzutc()
times = [[datetime(2013, 1, 1, tzinfo=utc), datetime(2014, 2, 2, 2, tzinfo=utc)],
[pd.NaT, datetime(2015, 3, 3, tzinfo=utc)]]
times_array = TensorArray(times)
df = pd.DataFrame({"t": times_array})
self.assertEqual(
repr(df),
textwrap.dedent(
"""\
t
0 [ 2013-01-01 00:00:00+00:00, 2014-02-02 02:00...
1 [ NaT, 2015-03-03 00:00..."""
)
)
class TensorArrayIOTests(unittest.TestCase):
def test_feather(self):
x = np.arange(10).reshape(5, 2)
s = TensorArray(x)
df = pd.DataFrame({"i": list(range(len(x))), "tensor": s})
with tempfile.TemporaryDirectory() as dirpath:
filename = os.path.join(dirpath, "tensor_array_test.feather")
df.to_feather(filename)
df_read = pd.read_feather(filename)
pd.testing.assert_frame_equal(df, df_read)
@pytest.mark.skipif(LooseVersion(pa.__version__) < LooseVersion("2.0.0"),
reason="Nested Parquet data types only supported in Arrow >= 2.0.0")
def test_parquet(self):
x = np.arange(10).reshape(5, 2)
s = TensorArray(x)
df = pd.DataFrame({"i": list(range(len(x))), "tensor": s})
with tempfile.TemporaryDirectory() as dirpath:
filename = os.path.join(dirpath, "tensor_array_test.parquet")
df.to_parquet(filename)
df_read = pd.read_parquet(filename)
pd.testing.assert_frame_equal(df, df_read)
def test_feather_chunked(self):
from pyarrow.feather import write_feather
x = np.arange(10).reshape(5, 2)
s = TensorArray(x)
df1 = pd.DataFrame({"i": list(range(len(s))), "tensor": s})
# Create a Table with 2 chunks
table1 = pa.Table.from_pandas(df1)
df2 = df1.copy()
df2["tensor"] = df2["tensor"] * 10
table2 = pa.Table.from_pandas(df2)
table = pa.concat_tables([table1, table2])
self.assertEqual(table.column("tensor").num_chunks, 2)
# Write table to feather and read back as a DataFrame
with tempfile.TemporaryDirectory() as dirpath:
filename = os.path.join(dirpath, "tensor_array_chunked_test.feather")
write_feather(table, filename)
df_read = pd.read_feather(filename)
df_expected | |
output = StockItem.objects.get(pk=output)
context['output'] = output
context['fully_allocated'] = build.isFullyAllocated(output)
context['allocated_parts'] = build.allocatedParts(output)
context['unallocated_parts'] = build.unallocatedParts(output)
except (ValueError, StockItem.DoesNotExist):
pass
return context
def save(self, build, form, **kwargs):
data = form.cleaned_data
location = data.get('location', None)
output = data.get('output', None)
# Complete the build output
build.completeBuildOutput(
output,
self.request.user,
location=location,
)
def get_data(self):
""" Provide feedback data back to the form """
return {
'success': _('Build output completed')
}
class BuildNotes(UpdateView):
""" View for editing the 'notes' field of a Build object.
"""
context_object_name = 'build'
template_name = 'build/notes.html'
model = Build
role_required = 'build.view'
fields = ['notes']
def get_success_url(self):
return reverse('build-notes', kwargs={'pk': self.get_object().id})
def get_context_data(self, **kwargs):
ctx = super().get_context_data(**kwargs)
ctx['editing'] = str2bool(self.request.GET.get('edit', ''))
return ctx
class BuildDetail(DetailView):
""" Detail view of a single Build object. """
model = Build
template_name = 'build/detail.html'
context_object_name = 'build'
role_required = 'build.view'
def get_context_data(self, **kwargs):
ctx = super(DetailView, self).get_context_data(**kwargs)
build = self.get_object()
ctx['bom_price'] = build.part.get_price_info(build.quantity, buy=False)
ctx['BuildStatus'] = BuildStatus
return ctx
class BuildAllocate(DetailView):
""" View for allocating parts to a Build """
model = Build
context_object_name = 'build'
template_name = 'build/allocate.html'
role_required = ['build.change']
def get_context_data(self, **kwargs):
""" Provide extra context information for the Build allocation page """
context = super(DetailView, self).get_context_data(**kwargs)
build = self.get_object()
part = build.part
bom_items = part.bom_items
context['part'] = part
context['bom_items'] = bom_items
context['BuildStatus'] = BuildStatus
context['bom_price'] = build.part.get_price_info(build.quantity, buy=False)
if str2bool(self.request.GET.get('edit', None)):
context['editing'] = True
return context
class BuildCreate(AjaxCreateView):
""" View to create a new Build object """
model = Build
context_object_name = 'build'
form_class = forms.EditBuildForm
ajax_form_title = _('New Build Order')
ajax_template_name = 'modal_form.html'
role_required = 'build.add'
def get_form(self):
form = super().get_form()
if form['part'].value():
form.fields['part'].widget = HiddenInput()
return form
def get_initial(self):
""" Get initial parameters for Build creation.
If 'part' is specified in the GET query, initialize the Build with the specified Part
"""
initials = super(BuildCreate, self).get_initial().copy()
part = self.request.GET.get('part', None)
if part:
try:
part = Part.objects.get(pk=part)
# User has provided a Part ID
initials['part'] = part
initials['destination'] = part.get_default_location()
except (ValueError, Part.DoesNotExist):
pass
initials['reference'] = Build.getNextBuildNumber()
initials['parent'] = self.request.GET.get('parent', None)
# User has provided a SalesOrder ID
initials['sales_order'] = self.request.GET.get('sales_order', None)
initials['quantity'] = self.request.GET.get('quantity', 1)
return initials
def get_data(self):
return {
'success': _('Created new build'),
}
def validate(self, build, form, **kwargs):
"""
Perform extra form validation.
- If part is trackable, check that either batch or serial numbers are calculated
By this point form.is_valid() has been executed
"""
pass
class BuildUpdate(AjaxUpdateView):
""" View for editing a Build object """
model = Build
form_class = forms.EditBuildForm
context_object_name = 'build'
ajax_form_title = _('Edit Build Details')
ajax_template_name = 'modal_form.html'
role_required = 'build.change'
def get_form(self):
form = super().get_form()
build = self.get_object()
# Fields which are included in the form, but hidden
hidden = [
'parent',
'sales_order',
]
if build.is_complete:
# Fields which cannot be edited once the build has been completed
hidden += [
'part',
'quantity',
'batch',
'take_from',
'destination',
]
for field in hidden:
form.fields[field].widget = HiddenInput()
return form
def get_data(self):
return {
'info': _('Edited build'),
}
class BuildDelete(AjaxDeleteView):
""" View to delete a build """
model = Build
ajax_template_name = 'build/delete_build.html'
ajax_form_title = _('Delete Build')
role_required = 'build.delete'
class BuildItemDelete(AjaxDeleteView):
""" View to 'unallocate' a BuildItem.
Really we are deleting the BuildItem object from the database.
"""
model = BuildItem
ajax_template_name = 'build/delete_build_item.html'
ajax_form_title = _('Unallocate Stock')
context_object_name = 'item'
role_required = 'build.delete'
def get_data(self):
return {
'danger': _('Removed parts from build allocation')
}
class BuildItemCreate(AjaxCreateView):
"""
View for allocating a StockItem to a build output.
"""
model = BuildItem
form_class = forms.EditBuildItemForm
ajax_template_name = 'build/create_build_item.html'
ajax_form_title = _('Allocate stock to build output')
role_required = 'build.add'
# The output StockItem against which the allocation is being made
output = None
# The "part" which is being allocated to the output
part = None
available_stock = None
def get_context_data(self):
"""
Provide context data to the template which renders the form.
"""
ctx = super().get_context_data()
if self.part:
ctx['part'] = self.part
if self.output:
ctx['output'] = self.output
if self.available_stock:
ctx['stock'] = self.available_stock
else:
ctx['no_stock'] = True
return ctx
def validate(self, build_item, form, **kwargs):
"""
Extra validation steps as required
"""
data = form.cleaned_data
stock_item = data.get('stock_item', None)
quantity = data.get('quantity', None)
if stock_item:
# Stock item must actually be in stock!
if not stock_item.in_stock:
form.add_error('stock_item', _('Item must be currently in stock'))
# Check that there are enough items available
if quantity is not None:
available = stock_item.unallocated_quantity()
if quantity > available:
form.add_error('stock_item', _('Stock item is over-allocated'))
form.add_error('quantity', _('Available') + ': ' + str(normalize(available)))
else:
form.add_error('stock_item', _('Stock item must be selected'))
def get_form(self):
""" Create Form for making / editing new Part object """
form = super(AjaxCreateView, self).get_form()
self.build = None
self.part = None
self.output = None
# If the Build object is specified, hide the input field.
# We do not want the users to be able to move a BuildItem to a different build
build_id = form['build'].value()
if build_id is not None:
"""
If the build has been provided, hide the widget to change the build selection.
Additionally, update the allowable selections for other fields.
"""
form.fields['build'].widget = HiddenInput()
form.fields['install_into'].queryset = StockItem.objects.filter(build=build_id, is_building=True)
self.build = Build.objects.get(pk=build_id)
else:
"""
Build has *not* been selected
"""
pass
# If the sub_part is supplied, limit to matching stock items
part_id = form['part_id'].value()
if part_id:
try:
self.part = Part.objects.get(pk=part_id)
except (ValueError, Part.DoesNotExist):
pass
# If the output stock item is specified, hide the input field
output_id = form['install_into'].value()
if output_id is not None:
try:
self.output = StockItem.objects.get(pk=output_id)
form.fields['install_into'].widget = HiddenInput()
except (ValueError, StockItem.DoesNotExist):
pass
else:
# If the output is not specified, but we know that the part is non-trackable, hide the install_into field
if self.part and not self.part.trackable:
form.fields['install_into'].widget = HiddenInput()
if self.build and self.part:
available_items = self.build.availableStockItems(self.part, self.output)
form.fields['stock_item'].queryset = available_items
self.available_stock = form.fields['stock_item'].queryset.all()
# If there is only a single stockitem available, select it!
if len(self.available_stock) == 1:
form.fields['stock_item'].initial = self.available_stock[0].pk
return form
def get_initial(self):
""" Provide initial data for BomItem. Look for the folllowing in the GET data:
- build: pk of the Build object
- part: pk of the Part object which we are assigning
- output: pk of the StockItem object into which the allocated stock will be installed
"""
initials = super(AjaxCreateView, self).get_initial().copy()
build_id = self.get_param('build')
part_id = self.get_param('part')
output_id = self.get_param('install_into')
# Reference to a Part object
part = None
# Reference to a StockItem object
item = None
# Reference to a Build object
build = None
# Reference to a StockItem object
output = None
if part_id:
try:
part = Part.objects.get(pk=part_id)
initials['part_id'] = part.pk
except Part.DoesNotExist:
pass
if build_id:
try:
build = Build.objects.get(pk=build_id)
initials['build'] = build
except Build.DoesNotExist:
pass
# If the output has been specified
if output_id:
try:
output = StockItem.objects.get(pk=output_id)
initials['install_into'] = output
except (ValueError, StockItem.DoesNotExist):
pass
# Work out how much stock is required
if build and part:
required_quantity = build.unallocatedQuantity(part, output)
else:
required_quantity = None
quantity = self.request.GET.get('quantity', None)
if quantity is not None:
quantity = float(quantity)
elif required_quantity is not None:
quantity = required_quantity
item_id = self.get_param('item')
# If the request specifies a particular StockItem
if item_id:
try:
item = StockItem.objects.get(pk=item_id)
except (ValueError, StockItem.DoesNotExist):
pass
# If a StockItem is not selected, try to auto-select one
if item is None and part is not None:
items = StockItem.objects.filter(part=part)
if items.count() == 1:
item = items.first()
# Finally, if a StockItem is selected, ensure the quantity is not too much
if item is not None:
if quantity is None:
quantity = item.unallocated_quantity()
else:
quantity = min(quantity, item.unallocated_quantity())
if quantity is not None:
initials['quantity'] = quantity
return initials
class BuildItemEdit(AjaxUpdateView):
""" View to edit a BuildItem object """
model = BuildItem
ajax_template_name = 'build/edit_build_item.html'
form_class = forms.EditBuildItemForm
ajax_form_title = _('Edit Stock Allocation')
role_required = 'build.change'
def get_data(self):
return {
'info': _('Updated Build Item'),
}
def get_form(self):
"""
Create form for editing a BuildItem.
- Limit the StockItem options | |
indicatorOfParameter == 215:
return '0to1'
if table2Version == 203 and indicatorOfParameter == 214:
return '0to1'
if table2Version == 203 and indicatorOfParameter == 213:
return 'kg m-2'
if table2Version == 203 and indicatorOfParameter == 212:
return 'kg/m2/h'
if table2Version == 203 and indicatorOfParameter == 211:
return 'No Unit'
if table2Version == 203 and indicatorOfParameter == 210:
return 'N m-2 s'
if table2Version == 203 and indicatorOfParameter == 209:
return 'N m-2 s'
if table2Version == 203 and indicatorOfParameter == 208:
return 'J kg-1'
if table2Version == 203 and indicatorOfParameter == 207:
return 'No Unit'
if table2Version == 203 and indicatorOfParameter == 206:
return 'JustAnumber'
if table2Version == 203 and indicatorOfParameter == 205:
return 'J kg-1'
if table2Version == 203 and indicatorOfParameter == 204:
return 'J kg-1'
if table2Version == 203 and indicatorOfParameter == 203:
return 'kg m-1 s-2'
if table2Version == 203 and indicatorOfParameter == 202:
return 'kg s-2'
if table2Version == 203 and indicatorOfParameter == 201:
return 'kg s-2'
if table2Version == 203 and indicatorOfParameter == 200:
return 'kg m-2'
if table2Version == 203 and indicatorOfParameter == 199:
return 'J kg-1'
if table2Version == 203 and indicatorOfParameter == 198:
return '%'
if table2Version == 203 and indicatorOfParameter == 197:
return 'No Unit'
if table2Version == 203 and indicatorOfParameter == 196:
return 'No Unit'
if table2Version == 203 and indicatorOfParameter == 195:
return 'J kg-1'
if table2Version == 203 and indicatorOfParameter == 194:
return 'm'
if table2Version == 203 and indicatorOfParameter == 193:
return 'kg m-2'
if table2Version == 203 and indicatorOfParameter == 192:
return 'kg m-2'
if table2Version == 203 and indicatorOfParameter == 191:
return 'kg m-2'
if table2Version == 203 and indicatorOfParameter == 190:
return 'kg m-2'
if table2Version == 203 and indicatorOfParameter == 189:
return 'kg m-2'
if table2Version == 203 and indicatorOfParameter == 188:
return 'kg m-2'
if table2Version == 203 and indicatorOfParameter == 187:
return 'kg m-2'
if table2Version == 203 and indicatorOfParameter == 186:
return 'kg m-2'
if table2Version == 203 and indicatorOfParameter == 185:
return 'm'
if table2Version == 203 and indicatorOfParameter == 184:
return 'm'
if table2Version == 203 and indicatorOfParameter == 183:
return 'm'
if table2Version == 203 and indicatorOfParameter == 182:
return 'hPa'
if table2Version == 203 and indicatorOfParameter == 181:
return 'hPa'
if table2Version == 203 and indicatorOfParameter == 180:
return 'hPa'
if table2Version == 203 and indicatorOfParameter == 179:
return 'K'
if table2Version == 203 and indicatorOfParameter == 178:
return 'K'
if table2Version == 203 and indicatorOfParameter == 177:
return 'K'
if table2Version == 203 and indicatorOfParameter == 176:
return 'K'
if table2Version == 203 and indicatorOfParameter == 175:
return 'K'
if table2Version == 203 and indicatorOfParameter == 174:
return 'K'
if table2Version == 203 and indicatorOfParameter == 173:
return 'K'
if table2Version == 203 and indicatorOfParameter == 172:
return 'kg s-2'
if table2Version == 203 and indicatorOfParameter == 171:
return 'kg s-2'
if table2Version == 203 and indicatorOfParameter == 170:
return 'kg s-2'
if table2Version == 203 and indicatorOfParameter == 169:
return 'kg s-2'
if table2Version == 203 and indicatorOfParameter == 168:
return 'kg s-2'
if table2Version == 203 and indicatorOfParameter == 167:
return 'm'
if table2Version == 203 and indicatorOfParameter == 166:
return 'kg/m2/h'
if table2Version == 203 and indicatorOfParameter == 165:
return 'kg/m2/h'
if table2Version == 203 and indicatorOfParameter == 164:
return 'm'
if table2Version == 203 and indicatorOfParameter == 163:
return 'm'
if table2Version == 203 and indicatorOfParameter == 161:
return '0to1'
if table2Version == 203 and indicatorOfParameter == 159:
return '%'
if table2Version == 203 and indicatorOfParameter == 158:
return '%'
if table2Version == 203 and indicatorOfParameter == 157:
return '%'
if table2Version == 203 and indicatorOfParameter == 156:
return '%'
if table2Version == 203 and indicatorOfParameter == 155:
return '%'
if table2Version == 203 and indicatorOfParameter == 154:
return '%'
if table2Version == 203 and indicatorOfParameter == 153:
return '%'
if table2Version == 203 and indicatorOfParameter == 152:
return '%'
if table2Version == 203 and indicatorOfParameter == 151:
return '%'
if table2Version == 203 and indicatorOfParameter == 150:
return 'C'
if table2Version == 203 and indicatorOfParameter == 149:
return 'm s-1'
if table2Version == 203 and indicatorOfParameter == 148:
return 'm2 s-2'
if table2Version == 203 and indicatorOfParameter == 147:
return 'm2 s-2'
if table2Version == 203 and indicatorOfParameter == 146:
return 'kt'
if table2Version == 203 and indicatorOfParameter == 145:
return 'kt'
if table2Version == 203 and indicatorOfParameter == 144:
return '%'
if table2Version == 203 and indicatorOfParameter == 143:
return '%'
if table2Version == 203 and indicatorOfParameter == 142:
return '%'
if table2Version == 203 and indicatorOfParameter == 141:
return '%'
if table2Version == 203 and indicatorOfParameter == 134:
return '%'
if table2Version == 203 and indicatorOfParameter == 133:
return '%'
if table2Version == 203 and indicatorOfParameter == 132:
return '%'
if table2Version == 203 and indicatorOfParameter == 131:
return '%'
if table2Version == 203 and indicatorOfParameter == 130:
return 'kg m-3'
if table2Version == 203 and indicatorOfParameter == 129:
return 'K'
if table2Version == 203 and indicatorOfParameter == 128:
return 'W m-2'
if table2Version == 203 and indicatorOfParameter == 126:
return 'W m-2'
if table2Version == 203 and indicatorOfParameter == 122:
return 'm2 s-2'
if table2Version == 203 and indicatorOfParameter == 121:
return 'm2 s-2'
if table2Version == 203 and indicatorOfParameter == 120:
return 'm2 s-2'
if table2Version == 203 and indicatorOfParameter == 119:
return 'm2 s-2'
if table2Version == 203 and indicatorOfParameter == 118:
return 'm2 s-2'
if table2Version == 203 and indicatorOfParameter == 117:
return 'm2 s-2'
if table2Version == 203 and indicatorOfParameter == 116:
return 'C'
if table2Version == 203 and indicatorOfParameter == 115:
return 'C'
if table2Version == 203 and indicatorOfParameter == 114:
return 'C'
if table2Version == 203 and indicatorOfParameter == 113:
return 'C'
if table2Version == 203 and indicatorOfParameter == 112:
return 'C'
if table2Version == 203 and indicatorOfParameter == 111:
return 'C'
if table2Version == 203 and indicatorOfParameter == 110:
return 'kg/m2/h'
if table2Version == 203 and indicatorOfParameter == 109:
return 'kg m-2 s-1'
if table2Version == 203 and indicatorOfParameter == 108:
return 'kg m-2 s-1'
if table2Version == 203 and indicatorOfParameter == 107:
return 'kg m-2'
if table2Version == 203 and indicatorOfParameter == 106:
return 'kg m-2'
if table2Version == 203 and indicatorOfParameter == 105:
return 'kg kg-1'
if table2Version == 203 and indicatorOfParameter == 104:
return 'kg kg-1'
if table2Version == 203 and indicatorOfParameter == 103:
return 'Code'
if table2Version == 203 and indicatorOfParameter == 102:
return 'Code'
if table2Version == 203 and indicatorOfParameter == 101:
return 'kg m-3'
if table2Version == 203 and indicatorOfParameter == 100:
return '0to1'
if table2Version == 203 and indicatorOfParameter == 99:
return '0to1'
if table2Version == 203 and indicatorOfParameter == 96:
return 'W m-2'
if table2Version == 203 and indicatorOfParameter == 95:
return 'W m-2'
if table2Version == 203 and indicatorOfParameter == 91:
return 'No Unit'
if table2Version == 203 and indicatorOfParameter == 89:
return '%'
if table2Version == 203 and indicatorOfParameter == 80:
return 'No Unit'
if table2Version == 203 and indicatorOfParameter == 79:
return '%'
if table2Version == 203 and indicatorOfParameter == 78:
return 'kg kg-1'
if table2Version == 203 and indicatorOfParameter == 77:
return 'cm'
if table2Version == 203 and indicatorOfParameter == 75:
return 'K'
if table2Version == 203 and indicatorOfParameter == 74:
return 'No Unit'
if table2Version == 203 and indicatorOfParameter == 73:
return 'kg/m2/h'
if table2Version == 203 and indicatorOfParameter == 72:
return 'kg/m2/h'
if table2Version == 203 and indicatorOfParameter == 71:
return 'kg/m2/h'
if table2Version == 203 and indicatorOfParameter == 70:
return 'm'
if table2Version == 203 and indicatorOfParameter == 69:
return 'W m-2'
if table2Version == 203 and indicatorOfParameter == 68:
return 'kg m-2'
if table2Version == 203 and indicatorOfParameter == 63:
return 'm 10-4'
if table2Version == | |
enormous_training
assert training_size == (len(no_training) +
len(tiny_training) +
len(low_training) +
len(medium_training) +
len(high_training) +
len(huge_training))# +
#len(enormous_training))
no_testing = no_cer_set[no_training_size:]
tiny_testing = tiny_cer_set[tiny_training_size:]
low_testing = low_cer_set[low_training_size:]
medium_testing = medium_cer_set[medium_training_size:]
high_testing = high_cer_set[high_training_size:]
huge_testing = huge_cer_set[huge_training_size:]
# enormous_testing = enormous_cer_set[enormous_training_size:]
testing_set = no_testing + tiny_testing + low_testing + medium_testing + high_testing + huge_testing# + enormous_testing
assert corpus_size - training_size == (len(no_testing) +
len(tiny_testing) +
len(low_testing) +
len(medium_testing) +
len(high_testing) +
len(huge_testing))# +
#len(enormous_testing))
#random.shuffle(training_set)
random.shuffle(testing_set)
validation_size = int(len(testing_set)/2)
validation_set = testing_set[:validation_size]
testing_set = testing_set[validation_size:]
no_cer_validation = []
incorrect_cer_validation = []
for a in validation_set:
if a[5] == 0:
no_cer_validation.append(a)
else:
incorrect_cer_validation.append(a)
no_cer_validation_size = 640
no_validation_sample = random.sample(no_cer_validation, no_cer_validation_size)
incorrect_cer_validation.extend(no_validation_sample)
validation_set = incorrect_cer_validation.copy()
incorrect_training = tiny_training + low_training + medium_training + high_training + huge_training
no_cer_training_size = 1950
no_training_sample = random.sample(no_training, no_cer_training_size)
incorrect_training.extend(no_training_sample)
training_set = incorrect_training.copy()
if combine_with_charge1:
#training
incorrect_set_charge1 = tiny_cer_set_charge1 + low_cer_set_charge1 + medium_cer_set_charge1 + high_cer_set_charge1 + huge_cer_set_charge1
no_cer_training_size_charge1 = 3400
no_training_sample_charge1 = random.sample(no_cer_set_charge1, no_cer_training_size_charge1)
incorrect_set_charge1.extend(no_training_sample_charge1)
training_set.extend(incorrect_set_charge1)
#validation
incorrect_set_charge1_validation = tiny_cer_set_charge1_validation + low_cer_set_charge1_validation + medium_cer_set_charge1_validation + high_cer_set_charge1_validation + huge_cer_set_charge1_validation
no_cer_validation_size_charge1 = 700
no_validation_sample_charge1 = random.sample(no_cer_set_charge1_validation, no_cer_validation_size_charge1)
incorrect_set_charge1_validation.extend(no_validation_sample_charge1)
validation_set.extend(incorrect_set_charge1_validation)
#testing
testing_set.extend(testing_charge1_data)
random.shuffle(training_set)
print('\nSize training set: {}'.format(len(training_set)))
print('Size testing set: {}'.format(len(testing_set)))
print('Size validation set: {}'.format(len(validation_set)))
save_alignments_to_sqlite(training_set, path=training_path, append=False)
save_alignments_to_sqlite(testing_set, path=testing_path, append=False)
save_alignments_to_sqlite(validation_set, path=validation_path, append=False)
################################################################################
@click.command()
@click.argument('in-dir', type=click.Path(exists=True))
@click.argument('out-dir', type=click.Path(exists=True))
<EMAIL>('training-proportion', default=0.8, help='Training data proportion')
@click.option('--seed', default=49, help='The seed of the random number generator.')
def split_dataset_sliding_window(in_dir, out_dir, seed):
'''
Split aligned data (sliding window) into training, validation and testing sets.
\b
Arguments:
in-dir -- Input database
out-dir -- Path to output databases
Formerly run_dataset_splitting_sliding_window_charge1.py
'''
random.seed(seed)
# make paths absolute
in_dir = os.path.abspath(in_dir)
out_dir = os.path.abspath(out_dir)
training_dir = os.path.join(out_dir, 'training_set_sliding_window.db')
testing_dir = os.path.join(out_dir, 'testing_set_sliding_window.db')
validation_dir = os.path.join(out_dir, 'validation_set_sliding_window.db')
loaded_data, _, _ = load_alignments_from_sqlite(path=in_dir, size='total')
# remove three word lines
data_compact = []
for line in loaded_data:
if len(line[4].split(' ')) == 4:
data_compact.append(line)
# remove long lines
data_short_lines = []
for line in data_compact:
if len(line[4]) <= 40:
data_short_lines.append(line)
# create dict for splitting
data_dict = defaultdict(list)
for line in data_short_lines:
data_dict[line[0]+line[1]].append(line)
# create training set
training_pages = 5000 #total: 6572
training_keys = random.sample(list(data_dict), training_pages)
training_set = []
for training_key in training_keys:
page_data = data_dict[training_key]
for line in page_data:
training_set.append(line)
# create testing set
testing_keys = [key for key in data_dict.keys() if key not in training_keys]
testing_set = []
for testing_key in testing_keys:
page_data = data_dict[testing_key]
for line in page_data:
testing_set.append(line)
# remove some correct lines (i.e. increase proportion of errors)
#drop_probability = 0.0
#training_set_copy = training_set.copy()
#training_set = []
#testing_set_copy = testing_set.copy()
#testing_set = []
#for alignment in training_set_copy:
# if alignment[5] == 0.0:
# if random.random() > drop_probability:
# training_set.append(alignment)
# else:
# training_set.append(alignment)
#for alignment in testing_set_copy:
# if alignment[5] == 0.0:
# if random.random() > drop_probability:
# testing_set.append(alignment)
# else:
# testing_set.append(alignment)
# remove high CERs
no_cer_training = 0
tiny_cer_training = 0
low_cer_training = 0
medium_cer_training = 0
high_cer_training = 0
huge_cer_training = 0
enormous_cer_training = 0
no_cer_training_set = []
tiny_cer_training_set = []
low_cer_training_set = []
medium_cer_training_set = []
high_cer_training_set = []
huge_cer_training_set = []
enormous_cer_training_set = []
for a in training_set:
if a[5] == 0:
no_cer_training+=1
no_cer_training_set.append(a)
elif a[5] < 0.02:
tiny_cer_training+=1
tiny_cer_training_set.append(a)
elif a[5] < 0.04:
low_cer_training+=1
low_cer_training_set.append(a)
elif a[5] < 0.06:
medium_cer_training+=1
medium_cer_training_set.append(a)
elif a[5] < 0.08:
high_cer_training+=1
high_cer_training_set.append(a)
elif a[5] < 0.1:
huge_cer_training+=1
huge_cer_training_set.append(a)
else:
enormous_cer_training+=1
enormous_cer_training_set.append(a)
training_set_filtered = no_cer_training_set + tiny_cer_training_set + low_cer_training_set + \
medium_cer_training_set + high_cer_training_set + huge_cer_training_set
random.shuffle(training_set_filtered)
no_cer_testing = 0
tiny_cer_testing = 0
low_cer_testing = 0
medium_cer_testing = 0
high_cer_testing = 0
huge_cer_testing = 0
enormous_cer_testing = 0
no_cer_testing_set = []
tiny_cer_testing_set = []
low_cer_testing_set = []
medium_cer_testing_set = []
high_cer_testing_set = []
huge_cer_testing_set = []
enormous_cer_testing_set = []
for a in testing_set:
if a[5] == 0:
no_cer_testing+=1
no_cer_testing_set.append(a)
elif a[5] < 0.02:
tiny_cer_testing+=1
tiny_cer_testing_set.append(a)
elif a[5] < 0.04:
low_cer_testing+=1
low_cer_testing_set.append(a)
elif a[5] < 0.06:
medium_cer_testing+=1
medium_cer_testing_set.append(a)
elif a[5] < 0.08:
high_cer_testing+=1
high_cer_testing_set.append(a)
elif a[5] < 0.1:
huge_cer_testing+=1
huge_cer_testing_set.append(a)
else:
enormous_cer_testing+=1
enormous_cer_testing_set.append(a)
random.shuffle(no_cer_testing_set)
random.shuffle(tiny_cer_testing_set)
random.shuffle(low_cer_testing_set)
random.shuffle(medium_cer_testing_set)
random.shuffle(high_cer_testing_set)
random.shuffle(huge_cer_testing_set)
validation_set_filtered = (no_cer_testing_set[:int(len(no_cer_testing_set)/2)] + \
tiny_cer_testing_set[:int(len(tiny_cer_testing_set)/2)] + \
low_cer_testing_set[:int(len(low_cer_testing_set)/2)] + \
medium_cer_testing_set[:int(len(medium_cer_testing_set)/2)] + \
high_cer_testing_set[:int(len(high_cer_testing_set)/2)] + \
huge_cer_testing_set[:int(len(huge_cer_testing_set)/2)])
testing_set_filtered = (no_cer_testing_set[int(len(no_cer_testing_set)/2):] + \
tiny_cer_testing_set[int(len(tiny_cer_testing_set)/2):] + \
low_cer_testing_set[int(len(low_cer_testing_set)/2):] + \
medium_cer_testing_set[int(len(medium_cer_testing_set)/2):] + \
high_cer_testing_set[int(len(high_cer_testing_set)/2):] + \
huge_cer_testing_set[int(len(huge_cer_testing_set)/2):])
random.shuffle(testing_set_filtered)
random.shuffle(validation_set_filtered)
save_alignments_to_sqlite(training_set_filtered, path=training_dir, append=False)
save_alignments_to_sqlite(testing_set_filtered, path=testing_dir, append=False)
save_alignments_to_sqlite(validation_set_filtered, path=validation_dir, append=False)
################################################################################
@click.command()
@click.argument('in-dir', type=click.Path(exists=True))
@click.argument('out-dir', type=click.Path(exists=True))
<EMAIL>('training-proportion', default=0.8, help='Training data proportion')
@click.option('--seed', default=49, help='')
def split_dataset_sliding_window_2(in_dir, out_dir, seed):
'''
Arguments:
in-dir -- Input database
out-dir -- Path to output databases
NOT TESTED! DO NOT USE! (formerly run_dataset_splitting_sliding_window_charge2.py)
'''
random.seed(49)
input_path = home_dir + '/Qurator/used_data/preproc_data/dta/aligned_corpus_sliding_window_german_charge2_080920.db'
training_path = home_dir + '/Qurator/used_data/preproc_data/dta/training_set_sliding_window_german_biased_charge2_170920.db'
testing_path = home_dir + '/Qurator/used_data/preproc_data/dta/testing_set_sliding_window_german_biased_2charges_170920.db'
validation_path = home_dir + '/Qurator/used_data/preproc_data/dta/validation_set_sliding_window_german_biased_2charges_small_170920.db'
testing_small_path = home_dir + '/Qurator/used_data/preproc_data/dta/testing_set_sliding_window_german_biased_2charges_small_170920.db'
german_data, german_data_as_df, headers = load_alignments_from_sqlite(path=input_path, size='total')
combine_with_charge1 = True
if combine_with_charge1:
#training_charge1_path = home_dir + '/Qurator/used_data/preproc_data/dta/training_set_00-10_sliding_window_german_150620.db'
#training_charge1_data, training_charge1_data_as_df, headers_charge1 = load_alignments_from_sqlite(path=training_charge1_path, size='total')
#incorrect_charge1_data = []
#for line in training_charge1_data:
# if line[5] > 0:
# incorrect_charge1_data.append(line)
testing_charge1_path = home_dir + '/Qurator/used_data/preproc_data/dta/testing_set_00-10_sliding_window_german_150620.db'
testing_charge1_data, testing_charge1_data_as_df, headers_charge1 = load_alignments_from_sqlite(path=testing_charge1_path, size='total')
# remove three word lines
german_data_compact = []
for line in german_data:
if len(line[4].split(' ')) == 4:
german_data_compact.append(line)
# remove long lines
german_data_short_lines = []
for line in german_data_compact:
if len(line[4]) <= 40:
german_data_short_lines.append(line)
# create dict for splitting
german_data_dict = defaultdict(list)
for line in german_data_short_lines:
german_data_dict[line[0]+line[1]].append(line)
# create training set
training_pages = 5000 #total: 5654
training_keys = random.sample(list(german_data_dict), training_pages)
training_set = []
for training_key in training_keys:
page_data = german_data_dict[training_key]
for line in page_data:
training_set.append(line)
correct_lines = 0
for line in training_set:
if line[5] == 0:
correct_lines += 1
incorrect_lines = len(training_set) - correct_lines
# create testing set
testing_keys = [key for key in german_data_dict.keys() if key not in training_keys]
testing_set = []
for testing_key in testing_keys:
page_data = german_data_dict[testing_key]
for line in page_data:
testing_set.append(line)
no_cer_training = 0
tiny_cer_training = 0
low_cer_training = 0
medium_cer_training = 0
high_cer_training = 0
huge_cer_training = 0
enormous_cer_training = 0
no_cer_training_set = []
tiny_cer_training_set = []
low_cer_training_set = []
medium_cer_training_set = []
high_cer_training_set = []
huge_cer_training_set = []
enormous_cer_training_set = []
for a in training_set:
if a[5] == 0:
no_cer_training+=1
no_cer_training_set.append(a)
elif a[5] < 0.02:
tiny_cer_training+=1
tiny_cer_training_set.append(a)
elif a[5] < 0.04:
low_cer_training+=1
low_cer_training_set.append(a)
elif a[5] < 0.06:
medium_cer_training+=1
medium_cer_training_set.append(a)
elif a[5] < 0.08:
high_cer_training+=1
high_cer_training_set.append(a)
elif a[5] < 0.1:
huge_cer_training+=1
huge_cer_training_set.append(a)
else:
enormous_cer_training+=1
enormous_cer_training_set.append(a)
incorrect_training_set_filtered = tiny_cer_training_set + low_cer_training_set + \
medium_cer_training_set + high_cer_training_set + huge_cer_training_set
#combine charge2 training set with charge1 training set
#if combine_with_charge1:
# incorrect_training_set_filtered.extend(incorrect_charge1_data)
ten_percent = 19600
reduced_no_cer_training_set = random.sample(no_cer_training_set, ten_percent)
training_set_final = incorrect_training_set_filtered + reduced_no_cer_training_set
random.shuffle(incorrect_training_set_filtered)
# create test set
no_cer_testing = 0
tiny_cer_testing = 0
low_cer_testing = 0
medium_cer_testing = 0
high_cer_testing = 0
huge_cer_testing = 0
enormous_cer_testing = 0
no_cer_testing_set = []
tiny_cer_testing_set = []
low_cer_testing_set = []
medium_cer_testing_set = []
high_cer_testing_set = []
huge_cer_testing_set = []
enormous_cer_testing_set = []
for a in testing_set:
if a[5] == 0:
no_cer_testing+=1
no_cer_testing_set.append(a)
elif a[5] < 0.02:
tiny_cer_testing+=1
tiny_cer_testing_set.append(a)
elif a[5] < 0.04:
low_cer_testing+=1
low_cer_testing_set.append(a)
elif a[5] < 0.06:
medium_cer_testing+=1
medium_cer_testing_set.append(a)
elif a[5] < 0.08:
high_cer_testing+=1
high_cer_testing_set.append(a)
elif a[5] < 0.1:
huge_cer_testing+=1
huge_cer_testing_set.append(a)
else:
enormous_cer_testing+=1
enormous_cer_testing_set.append(a)
testing_set_filtered = no_cer_testing_set + tiny_cer_testing_set + low_cer_testing_set + \
medium_cer_testing_set + high_cer_testing_set + huge_cer_testing_set
# random.shuffle(testing_set_filtered)
random.shuffle(no_cer_testing_set)
random.shuffle(tiny_cer_testing_set)
random.shuffle(low_cer_testing_set)
random.shuffle(medium_cer_testing_set)
random.shuffle(high_cer_testing_set)
random.shuffle(huge_cer_testing_set)
no_cer_set_validation = no_cer_testing_set[:int(len(no_cer_testing_set)/2)]
tiny_cer_set_validation = tiny_cer_testing_set[:int(len(tiny_cer_testing_set)/2)]
low_cer_set_validation = low_cer_testing_set[:int(len(low_cer_testing_set)/2)]
medium_cer_set_validation = medium_cer_testing_set[:int(len(medium_cer_testing_set)/2)]
high_cer_set_validation = high_cer_testing_set[:int(len(high_cer_testing_set)/2)]
huge_cer_set_validation = huge_cer_testing_set[:int(len(huge_cer_testing_set)/2)]
#validation_size = int(len(testing_set_filtered)/2)
#validation_set_final = testing_set_filtered[:validation_size]
validation_set_incorrect = tiny_cer_set_validation + low_cer_set_validation + medium_cer_set_validation + high_cer_set_validation + huge_cer_set_validation
validation_no_cer_size = 630
validation_no_cer_sample = random.sample(no_cer_set_validation, validation_no_cer_size)
validation_set_incorrect.extend(validation_no_cer_sample)
validation_set_final = validation_set_incorrect.copy()
no_cer_set_testing = no_cer_testing_set[int(len(no_cer_testing_set)/2):]
tiny_cer_set_testing = tiny_cer_testing_set[int(len(tiny_cer_testing_set)/2):]
low_cer_set_testing = low_cer_testing_set[int(len(low_cer_testing_set)/2):]
medium_cer_set_testing = medium_cer_testing_set[int(len(medium_cer_testing_set)/2):]
high_cer_set_testing = high_cer_testing_set[int(len(high_cer_testing_set)/2):]
huge_cer_set_testing = huge_cer_testing_set[int(len(huge_cer_testing_set)/2):]
testing_set_final = no_cer_set_testing + tiny_cer_set_testing + low_cer_set_testing + medium_cer_set_testing + high_cer_set_testing + huge_cer_set_testing
testing_set_incorrect = tiny_cer_set_testing + low_cer_set_testing + medium_cer_set_testing + high_cer_set_testing + huge_cer_set_testing
testing_no_cer_size = 620
testing_no_cer_sample = random.sample(no_cer_set_testing, testing_no_cer_size)
testing_set_incorrect.extend(testing_no_cer_sample)
testing_set_final_small = testing_set_incorrect.copy()
if combine_with_charge1:
no_cer_testing_charge1 = 0
tiny_cer_testing_charge1 = 0
low_cer_testing_charge1 = 0
medium_cer_testing_charge1 = 0
high_cer_testing_charge1 = 0
huge_cer_testing_charge1 = 0
enormous_cer_testing_charge1 = 0
no_cer_testing_set_charge1 = []
tiny_cer_testing_set_charge1 = []
low_cer_testing_set_charge1 = []
medium_cer_testing_set_charge1 = | |
**kwargs):
mypatch = []
for key, value in kwargs.items():
mypatch.append(dict(path='/' + key, value=value, op='replace'))
return cgtsclient(request).icpu.update(cpu_id, mypatch)
def host_memory_list(request, host_id):
memorys = cgtsclient(request).imemory.list(host_id)
return [Memory(n) for n in memorys]
def host_memory_get(request, memory_id):
memory = cgtsclient(request).imemory.get(memory_id)
if not memory:
raise ValueError('No match found for memory_id "%s".' % memory_id)
return Memory(memory)
def host_memory_update(request, memory_id, **kwargs):
mypatch = []
for key, value in kwargs.items():
mypatch.append(dict(path='/' + key, value=value, op='replace'))
return cgtsclient(request).imemory.update(memory_id, mypatch)
def host_port_list(request, host_id):
ports = cgtsclient(request).ethernet_port.list(host_id)
return [Port(n) for n in ports]
def host_port_get(request, port_id):
port = cgtsclient(request).ethernet_port.get(port_id)
if not port:
raise ValueError('No match found for port_id "%s".' % port_id)
return Port(port)
def host_port_update(request, port_id, **kwargs):
mypatch = []
for key, value in kwargs.items():
mypatch.append(dict(path='/' + key, value=value, op='replace'))
return cgtsclient(request).ethernet_port.update(port_id, mypatch)
def host_disk_list(request, host_id):
disks = cgtsclient(request).idisk.list(host_id)
return [Disk(n) for n in disks]
def host_disk_get(request, disk_id):
disk = cgtsclient(request).idisk.get(disk_id)
if not disk:
raise ValueError('No match found for disk_id "%s".' % disk_id)
return Disk(disk)
def host_stor_list(request, host_id):
volumes = cgtsclient(request).istor.list(host_id)
return [StorageVolume(n) for n in volumes]
def host_stor_get(request, stor_id):
volume = cgtsclient(request).istor.get(stor_id)
if not volume:
raise ValueError('No match found for stor_id "%s".' % stor_id)
return StorageVolume(volume)
def host_stor_create(request, **kwargs):
stor = cgtsclient(request).istor.create(**kwargs)
return StorageVolume(stor)
def host_stor_delete(request, stor_id):
return cgtsclient(request).istor.delete(stor_id)
def host_stor_update(request, stor_id, **kwargs):
mypatch = []
for key, value in kwargs.items():
mypatch.append(dict(path='/' + key, value=value, op='replace'))
stor = cgtsclient(request).istor.update(stor_id, mypatch)
return StorageVolume(stor)
def host_stor_get_by_function(request, host_id, function=None):
volumes = cgtsclient(request).istor.list(host_id)
if function:
volumes = [v for v in volumes if v.function == function]
return [StorageVolume(n) for n in volumes]
class Interface(base.APIResourceWrapper):
"""Wrapper for Inventory Interfaces"""
_attrs = ['id', 'uuid', 'ifname', 'ifclass', 'iftype', 'imtu', 'imac',
'aemode', 'txhashpolicy', 'primary_reselect', 'vlan_id',
'uses', 'used_by', 'ihost_uuid',
'ipv4_mode', 'ipv6_mode', 'ipv4_pool', 'ipv6_pool',
'sriov_numvfs', 'sriov_vf_driver']
def __init__(self, apiresource):
super(Interface, self).__init__(apiresource)
if not self.ifname:
self.ifname = '(' + str(self.uuid)[-8:] + ')'
def host_interface_list(request, host_id):
interfaces = cgtsclient(request).iinterface.list(host_id)
return [Interface(n) for n in interfaces]
def host_interface_get(request, interface_id):
interface = cgtsclient(request).iinterface.get(interface_id)
if not interface:
raise ValueError(
'No match found for interface_id "%s".' % interface_id)
return Interface(interface)
def host_interface_create(request, **kwargs):
interface = cgtsclient(request).iinterface.create(**kwargs)
return Interface(interface)
def host_interface_update(request, interface_id, **kwargs):
mypatch = []
for key, value in kwargs.items():
mypatch.append(dict(path='/' + key, value=value, op='replace'))
return cgtsclient(request).iinterface.update(interface_id, mypatch)
def host_interface_delete(request, interface_id):
return cgtsclient(request).iinterface.delete(interface_id)
class Network(base.APIResourceWrapper):
"""Wrapper for Inventory Networks"""
_attrs = ['id', 'uuid', 'type', 'name', 'mtu', 'link_capacity',
'vlan_id', 'dynamic', 'pool_uuid']
def __init__(self, apiresource):
super(Network, self).__init__(apiresource)
def network_list(request):
networks = cgtsclient(request).network.list()
return [Network(n) for n in networks]
def network_get(request, network_uuid):
network = cgtsclient(request).network.get(network_uuid)
if not network:
raise ValueError(
'No match found for network_uuid "%s".' % network_uuid)
return Network(network)
def network_create(request, **kwargs):
network = cgtsclient(request).network.create(**kwargs)
return Network(network)
def network_delete(request, network_uuid):
return cgtsclient(request).network.delete(network_uuid)
class InterfaceNetwork(base.APIResourceWrapper):
"""Wrapper for Inventory Interface Networks"""
_attrs = ['forihostid', 'id', 'uuid', 'interface_id',
'interface_uuid', 'ifname', 'network_id',
'network_uuid', 'network_name', 'network_type']
def __init__(self, apiresource):
super(InterfaceNetwork, self).__init__(apiresource)
def interface_network_list_by_host(request, host_uuid):
interface_networks = cgtsclient(request).interface_network.list_by_host(
host_uuid)
return [InterfaceNetwork(n) for n in interface_networks]
def interface_network_list_by_interface(request, interface_uuid):
interface_networks = cgtsclient(request).interface_network.\
list_by_interface(interface_uuid)
return [InterfaceNetwork(n) for n in interface_networks]
def interface_network_get(request, interface_network_uuid):
interface_network = cgtsclient(request).interface_network.get(
interface_network_uuid)
if not interface_network:
raise ValueError(
'No match found for interface_network_uuid "%s".'
% interface_network_uuid)
return InterfaceNetwork(interface_network)
def interface_network_assign(request, **kwargs):
interface_network = cgtsclient(request).interface_network.assign(**kwargs)
return InterfaceNetwork(interface_network)
def interface_network_remove(request, interface_network_uuid):
return cgtsclient(request).interface_network.remove(interface_network_uuid)
class InterfaceDataNetwork(base.APIResourceWrapper):
"""Wrapper for Inventory Interface Networks"""
_attrs = ['forihostid', 'id', 'uuid', 'interface_id',
'interface_uuid', 'ifname', 'datanetwork_id',
'datanetwork_uuid', 'datanetwork_name', 'network_type']
def __init__(self, apiresource):
super(InterfaceDataNetwork, self).__init__(apiresource)
def interface_datanetwork_list_by_host(request, host_uuid):
interface_datanetworks = cgtsclient(request).interface_datanetwork.\
list_by_host(host_uuid)
return [InterfaceDataNetwork(n) for n in interface_datanetworks]
def interface_datanetwork_list_by_interface(request, interface_uuid):
interface_datanetworks = cgtsclient(request).interface_datanetwork.\
list_by_interface(interface_uuid)
return [InterfaceDataNetwork(n) for n in interface_datanetworks]
def interface_datanetwork_get(request, interface_datanetwork_uuid):
interface_datanetwork = cgtsclient(request).interface_datanetwork.get(
interface_datanetwork_uuid)
if not interface_datanetwork:
raise ValueError(
'No match found for interface_datanetwork_uuid "%s".'
% interface_datanetwork_uuid)
return InterfaceDataNetwork(interface_datanetwork)
def interface_datanetwork_assign(request, **kwargs):
interface_datanetwork = cgtsclient(request).interface_datanetwork.\
assign(**kwargs)
return InterfaceNetwork(interface_datanetwork)
def interface_datanetwork_remove(request, interface_datanetwork_uuid):
return cgtsclient(request).interface_datanetwork.remove(
interface_datanetwork_uuid)
class Address(base.APIResourceWrapper):
"""Wrapper for Inventory Addresses"""
_attrs = ['uuid', 'interface_uuid', 'address', 'prefix', 'enable_dad']
def __init__(self, apiresource):
super(Address, self).__init__(apiresource)
def address_list_by_interface(request, interface_id):
addresses = cgtsclient(request).address.list_by_interface(interface_id)
return [Address(n) for n in addresses]
def address_get(request, address_uuid):
address = cgtsclient(request).address.get(address_uuid)
if not address:
raise ValueError(
'No match found for address uuid "%s".' % address_uuid)
return Address(address)
def address_create(request, **kwargs):
address = cgtsclient(request).address.create(**kwargs)
return Address(address)
def address_delete(request, address_uuid):
return cgtsclient(request).address.delete(address_uuid)
class AddressPool(base.APIResourceWrapper):
"""Wrapper for Inventory Address Pools"""
_attrs = ['uuid', 'name', 'network', 'prefix', 'order', 'ranges']
def __init__(self, apiresource):
super(AddressPool, self).__init__(apiresource)
def address_pool_list(request):
pools = cgtsclient(request).address_pool.list()
return [AddressPool(p) for p in pools]
def address_pool_get(request, address_pool_uuid):
pool = cgtsclient(request).address_pool.get(address_pool_uuid)
if not pool:
raise ValueError(
'No match found for address pool uuid "%s".' % address_pool_uuid)
return AddressPool(pool)
def address_pool_create(request, **kwargs):
pool = cgtsclient(request).address_pool.create(**kwargs)
return AddressPool(pool)
def address_pool_delete(request, address_pool_uuid):
return cgtsclient(request).address_pool.delete(address_pool_uuid)
def address_pool_update(request, address_pool_uuid, **kwargs):
mypatch = []
for key, value in kwargs.items():
mypatch.append(dict(path='/' + key, value=value, op='replace'))
return cgtsclient(request).address_pool.update(address_pool_uuid, mypatch)
class Route(base.APIResourceWrapper):
"""Wrapper for Inventory Routers"""
_attrs = ['uuid', 'interface_uuid', 'network',
'prefix', 'gateway', 'metric']
def __init__(self, apiresource):
super(Route, self).__init__(apiresource)
def route_list_by_interface(request, interface_id):
routees = cgtsclient(request).route.list_by_interface(interface_id)
return [Route(n) for n in routees]
def route_get(request, route_uuid):
route = cgtsclient(request).route.get(route_uuid)
if not route:
raise ValueError(
'No match found for route uuid "%s".' % route_uuid)
return Route(route)
def route_create(request, **kwargs):
route = cgtsclient(request).route.create(**kwargs)
return Route(route)
def route_delete(request, route_uuid):
return cgtsclient(request).route.delete(route_uuid)
class Device(base.APIResourceWrapper):
"""Wrapper for Inventory Devices"""
_attrs = ['uuid', 'name', 'pciaddr', 'host_uuid',
'pclass_id', 'pvendor_id', 'pdevice_id',
'pclass', 'pvendor', 'pdevice',
'numa_node', 'enabled', 'extra_info',
'sriov_totalvfs', 'sriov_numvfs', 'sriov_vfs_pci_address']
def __init__(self, apiresource):
super(Device, self).__init__(apiresource)
if not self.name:
self.name = '(' + str(self.uuid)[-8:] + ')'
def host_device_list(request, host_id):
devices = cgtsclient(request).pci_device.list(host_id)
return [Device(n) for n in devices]
def device_list_all(request):
devices = cgtsclient(request).pci_device.list_all()
return [Device(n) for n in devices]
def host_device_get(request, device_uuid):
device = cgtsclient(request).pci_device.get(device_uuid)
if device:
return Device(device)
raise ValueError('No match found for device "%s".' % device_uuid)
def host_device_update(request, device_uuid, **kwargs):
mypatch = []
for key, value in kwargs.items():
mypatch.append(dict(path='/' + key, value=value, op='replace'))
return cgtsclient(request).pci_device.update(device_uuid, mypatch)
class LldpNeighbour(base.APIResourceWrapper):
"""Wrapper for Inventory LLDP Neighbour"""
_attrs = ['port_uuid',
'port_name',
'port_namedisplay',
'uuid',
'host_uuid',
'msap',
'chassis_id',
'port_identifier',
'port_description',
'ttl',
'system_name',
'system_description',
'system_capabilities',
'management_address',
'dot1_port_vid',
'dot1_proto_vids',
'dot1_vlan_names',
'dot1_proto_ids',
'dot1_vid_digest',
'dot1_management_vid',
'dot1_lag',
'dot3_mac_status',
'dot3_power_mdi',
'dot3_max_frame']
def __init__(self, apiresource):
super(LldpNeighbour, self).__init__(apiresource)
def get_local_port_display_name(self):
if self.port_name:
return self.port_name
if self.port_namedisplay:
return self.port_namedisplay
else:
return '(' + str(self.port_uuid)[-8:] + ')'
def host_lldpneighbour_list(request, host_uuid):
neighbours = cgtsclient(request).lldp_neighbour.list(host_uuid)
return [LldpNeighbour(n) for n in neighbours]
def host_lldpneighbour_get(request, neighbour_uuid):
neighbour = cgtsclient(request).lldp_neighbour.get(neighbour_uuid)
if not neighbour:
raise ValueError('No match found for neighbour id "%s".' %
neighbour_uuid)
return LldpNeighbour(neighbour)
def port_lldpneighbour_list(request, port_uuid):
neighbours = cgtsclient(request).lldp_neighbour.list_by_port(port_uuid)
return [LldpNeighbour(n) for n in neighbours]
class ServiceParameter(base.APIResourceWrapper):
"""Wrapper for Service Parameter configuration"""
_attrs = ['uuid', 'service', 'section', 'name', 'value']
def __init__(self, apiresource):
super(ServiceParameter, self).__init__(apiresource)
def service_parameter_list(request):
parameters = cgtsclient(request).service_parameter.list()
return [ServiceParameter(n) for n in parameters]
class SDNController(base.APIResourceWrapper):
"""Wrapper for SDN Controller configuration"""
_attrs = ['uuid', 'ip_address', 'port', 'transport', 'state',
'created_at', 'updated_at']
def __init__(self, apiresource):
super(SDNController, self).__init__(apiresource)
def sdn_controller_list(request):
controllers = cgtsclient(request).sdn_controller.list()
return [SDNController(n) for n in controllers]
def sdn_controller_get(request, uuid):
controller = cgtsclient(request).sdn_controller.get(uuid)
if not controller:
raise ValueError('No match found for SDN controller id "%s".' %
uuid)
return SDNController(controller)
def sdn_controller_create(request, **kwargs):
controller = cgtsclient(request).sdn_controller.create(**kwargs)
return SDNController(controller)
def sdn_controller_update(request, uuid, **kwargs):
mypatch = []
for key, value in kwargs.items():
mypatch.append(dict(path='/' + key, value=value, op='replace'))
return cgtsclient(request).sdn_controller.update(uuid, mypatch)
def sdn_controller_delete(request, uuid):
return cgtsclient(request).sdn_controller.delete(uuid)
def get_sdn_enabled(request):
# The SDN enabled flag is present in the Capabilities
# of the system table, however capabilties is not exposed
# as an attribute through system_list() or system_get()
# at this level. We will therefore check the platform.conf
# to see if SDN is configured.
try:
with open(PLATFORM_CONFIGURATION, 'r') as fd:
content = fd.readlines()
sdn_enabled = None
for line in content:
if 'sdn_enabled' in line:
sdn_enabled = line
break
sdn_enabled = sdn_enabled.strip('\n').split('=', 1)
return (sdn_enabled[1].lower() == 'yes')
except Exception:
return False
def get_vswitch_type(request):
try:
systems = system_list(request)
system_capabilities = systems[0].to_dict().get('capabilities')
vswitch_type = system_capabilities.get('vswitch_type', 'none')
if vswitch_type != 'none':
return vswitch_type
else:
return None
except Exception:
return None
def is_system_mode_simplex(request):
systems = system_list(request)
system_mode = systems[0].to_dict().get('system_mode')
if system_mode == constants.SYSTEM_MODE_SIMPLEX:
return True
return False
def get_system_type(request):
systems = system_list(request)
system_type = systems[0].to_dict().get('system_type')
return system_type
def get_ceph_storage_model(request):
cluster = cluster_get(request, constants.CLUSTER_CEPH_DEFAULT_NAME)
backends = get_storage_backend(request)
if STORAGE_BACKEND_CEPH not in backends:
return None
return cluster.deployment_model
def is_host_with_storage(request, host_id):
storage_model = get_ceph_storage_model(request)
if storage_model == constants.CEPH_AIO_SX_MODEL:
# We have a single host, no need to query further
return True
host = host_get(request, host_id)
if storage_model == constants.CEPH_STORAGE_MODEL:
if host._personality == constants.STORAGE:
return True
else:
return False
elif storage_model == constants.CEPH_CONTROLLER_MODEL:
if host._personality == constants.CONTROLLER:
return True
else:
return False
else:
# Storage model is undefined
return False
class DataNetwork(base.APIResourceWrapper):
"""..."""
_attrs = ['id', 'uuid', 'network_type', 'name', 'mtu', 'description',
'multicast_group', | |
self.private_endpoint_status = None
class OriginGroup(Resource):
"""Origin group comprising of origins is used for load balancing to origins when the content cannot be served from CDN.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Resource ID.
:vartype id: str
:ivar name: Resource name.
:vartype name: str
:ivar type: Resource type.
:vartype type: str
:ivar system_data: Read only system data.
:vartype system_data: ~azure.mgmt.cdn.models.SystemData
:param health_probe_settings: Health probe settings to the origin that is used to determine the
health of the origin.
:type health_probe_settings: ~azure.mgmt.cdn.models.HealthProbeParameters
:param origins: The source of the content being delivered via CDN within given origin group.
:type origins: list[~azure.mgmt.cdn.models.ResourceReference]
:param traffic_restoration_time_to_healed_or_new_endpoints_in_minutes: Time in minutes to shift
the traffic to the endpoint gradually when an unhealthy endpoint comes healthy or a new
endpoint is added. Default is 10 mins. This property is currently not supported.
:type traffic_restoration_time_to_healed_or_new_endpoints_in_minutes: int
:param response_based_origin_error_detection_settings: The JSON object that contains the
properties to determine origin health using real requests/responses. This property is currently
not supported.
:type response_based_origin_error_detection_settings:
~azure.mgmt.cdn.models.ResponseBasedOriginErrorDetectionParameters
:ivar resource_state: Resource status of the origin group. Possible values include: "Creating",
"Active", "Deleting".
:vartype resource_state: str or ~azure.mgmt.cdn.models.OriginGroupResourceState
:ivar provisioning_state: Provisioning status of the origin group.
:vartype provisioning_state: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'system_data': {'readonly': True},
'traffic_restoration_time_to_healed_or_new_endpoints_in_minutes': {'maximum': 50, 'minimum': 0},
'resource_state': {'readonly': True},
'provisioning_state': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'system_data': {'key': 'systemData', 'type': 'SystemData'},
'health_probe_settings': {'key': 'properties.healthProbeSettings', 'type': 'HealthProbeParameters'},
'origins': {'key': 'properties.origins', 'type': '[ResourceReference]'},
'traffic_restoration_time_to_healed_or_new_endpoints_in_minutes': {'key': 'properties.trafficRestorationTimeToHealedOrNewEndpointsInMinutes', 'type': 'int'},
'response_based_origin_error_detection_settings': {'key': 'properties.responseBasedOriginErrorDetectionSettings', 'type': 'ResponseBasedOriginErrorDetectionParameters'},
'resource_state': {'key': 'properties.resourceState', 'type': 'str'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
}
def __init__(
self,
*,
health_probe_settings: Optional["HealthProbeParameters"] = None,
origins: Optional[List["ResourceReference"]] = None,
traffic_restoration_time_to_healed_or_new_endpoints_in_minutes: Optional[int] = None,
response_based_origin_error_detection_settings: Optional["ResponseBasedOriginErrorDetectionParameters"] = None,
**kwargs
):
super(OriginGroup, self).__init__(**kwargs)
self.health_probe_settings = health_probe_settings
self.origins = origins
self.traffic_restoration_time_to_healed_or_new_endpoints_in_minutes = traffic_restoration_time_to_healed_or_new_endpoints_in_minutes
self.response_based_origin_error_detection_settings = response_based_origin_error_detection_settings
self.resource_state = None
self.provisioning_state = None
class OriginGroupListResult(msrest.serialization.Model):
"""Result of the request to list origin groups. It contains a list of origin groups objects and a URL link to get the next set of results.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar value: List of CDN origin groups within an endpoint.
:vartype value: list[~azure.mgmt.cdn.models.OriginGroup]
:param next_link: URL to get the next set of origin objects if there are any.
:type next_link: str
"""
_validation = {
'value': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[OriginGroup]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
*,
next_link: Optional[str] = None,
**kwargs
):
super(OriginGroupListResult, self).__init__(**kwargs)
self.value = None
self.next_link = next_link
class OriginGroupOverrideAction(DeliveryRuleAction):
"""Defines the origin group override action for the delivery rule.
All required parameters must be populated in order to send to Azure.
:param name: Required. The name of the action for the delivery rule.Constant filled by server.
Possible values include: "CacheExpiration", "CacheKeyQueryString", "ModifyRequestHeader",
"ModifyResponseHeader", "UrlRedirect", "UrlRewrite", "UrlSigning", "OriginGroupOverride".
:type name: str or ~azure.mgmt.cdn.models.DeliveryRuleActionEnum
:param parameters: Required. Defines the parameters for the action.
:type parameters: ~azure.mgmt.cdn.models.OriginGroupOverrideActionParameters
"""
_validation = {
'name': {'required': True},
'parameters': {'required': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'parameters': {'key': 'parameters', 'type': 'OriginGroupOverrideActionParameters'},
}
def __init__(
self,
*,
parameters: "OriginGroupOverrideActionParameters",
**kwargs
):
super(OriginGroupOverrideAction, self).__init__(**kwargs)
self.name = 'OriginGroupOverride' # type: str
self.parameters = parameters
class OriginGroupOverrideActionParameters(msrest.serialization.Model):
"""Defines the parameters for the origin group override action.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar odata_type: Required. Default value:
"#Microsoft.Azure.Cdn.Models.DeliveryRuleOriginGroupOverrideActionParameters".
:vartype odata_type: str
:param origin_group: Required. defines the OriginGroup that would override the
DefaultOriginGroup.
:type origin_group: ~azure.mgmt.cdn.models.ResourceReference
"""
_validation = {
'odata_type': {'required': True, 'constant': True},
'origin_group': {'required': True},
}
_attribute_map = {
'odata_type': {'key': '@odata\\.type', 'type': 'str'},
'origin_group': {'key': 'originGroup', 'type': 'ResourceReference'},
}
odata_type = "#Microsoft.Azure.Cdn.Models.DeliveryRuleOriginGroupOverrideActionParameters"
def __init__(
self,
*,
origin_group: "ResourceReference",
**kwargs
):
super(OriginGroupOverrideActionParameters, self).__init__(**kwargs)
self.origin_group = origin_group
class OriginGroupUpdatePropertiesParameters(msrest.serialization.Model):
"""The JSON object that contains the properties of the origin group.
:param health_probe_settings: Health probe settings to the origin that is used to determine the
health of the origin.
:type health_probe_settings: ~azure.mgmt.cdn.models.HealthProbeParameters
:param origins: The source of the content being delivered via CDN within given origin group.
:type origins: list[~azure.mgmt.cdn.models.ResourceReference]
:param traffic_restoration_time_to_healed_or_new_endpoints_in_minutes: Time in minutes to shift
the traffic to the endpoint gradually when an unhealthy endpoint comes healthy or a new
endpoint is added. Default is 10 mins. This property is currently not supported.
:type traffic_restoration_time_to_healed_or_new_endpoints_in_minutes: int
:param response_based_origin_error_detection_settings: The JSON object that contains the
properties to determine origin health using real requests/responses. This property is currently
not supported.
:type response_based_origin_error_detection_settings:
~azure.mgmt.cdn.models.ResponseBasedOriginErrorDetectionParameters
"""
_validation = {
'traffic_restoration_time_to_healed_or_new_endpoints_in_minutes': {'maximum': 50, 'minimum': 0},
}
_attribute_map = {
'health_probe_settings': {'key': 'healthProbeSettings', 'type': 'HealthProbeParameters'},
'origins': {'key': 'origins', 'type': '[ResourceReference]'},
'traffic_restoration_time_to_healed_or_new_endpoints_in_minutes': {'key': 'trafficRestorationTimeToHealedOrNewEndpointsInMinutes', 'type': 'int'},
'response_based_origin_error_detection_settings': {'key': 'responseBasedOriginErrorDetectionSettings', 'type': 'ResponseBasedOriginErrorDetectionParameters'},
}
def __init__(
self,
*,
health_probe_settings: Optional["HealthProbeParameters"] = None,
origins: Optional[List["ResourceReference"]] = None,
traffic_restoration_time_to_healed_or_new_endpoints_in_minutes: Optional[int] = None,
response_based_origin_error_detection_settings: Optional["ResponseBasedOriginErrorDetectionParameters"] = None,
**kwargs
):
super(OriginGroupUpdatePropertiesParameters, self).__init__(**kwargs)
self.health_probe_settings = health_probe_settings
self.origins = origins
self.traffic_restoration_time_to_healed_or_new_endpoints_in_minutes = traffic_restoration_time_to_healed_or_new_endpoints_in_minutes
self.response_based_origin_error_detection_settings = response_based_origin_error_detection_settings
class OriginGroupProperties(OriginGroupUpdatePropertiesParameters):
"""The JSON object that contains the properties of the origin group.
Variables are only populated by the server, and will be ignored when sending a request.
:param health_probe_settings: Health probe settings to the origin that is used to determine the
health of the origin.
:type health_probe_settings: ~azure.mgmt.cdn.models.HealthProbeParameters
:param origins: The source of the content being delivered via CDN within given origin group.
:type origins: list[~azure.mgmt.cdn.models.ResourceReference]
:param traffic_restoration_time_to_healed_or_new_endpoints_in_minutes: Time in minutes to shift
the traffic to the endpoint gradually when an unhealthy endpoint comes healthy or a new
endpoint is added. Default is 10 mins. This property is currently not supported.
:type traffic_restoration_time_to_healed_or_new_endpoints_in_minutes: int
:param response_based_origin_error_detection_settings: The JSON object that contains the
properties to determine origin health using real requests/responses. This property is currently
not supported.
:type response_based_origin_error_detection_settings:
~azure.mgmt.cdn.models.ResponseBasedOriginErrorDetectionParameters
:ivar resource_state: Resource status of the origin group. Possible values include: "Creating",
"Active", "Deleting".
:vartype resource_state: str or ~azure.mgmt.cdn.models.OriginGroupResourceState
:ivar provisioning_state: Provisioning status of the origin group.
:vartype provisioning_state: str
"""
_validation = {
'traffic_restoration_time_to_healed_or_new_endpoints_in_minutes': {'maximum': 50, 'minimum': 0},
'resource_state': {'readonly': True},
'provisioning_state': {'readonly': True},
}
_attribute_map = {
'health_probe_settings': {'key': 'healthProbeSettings', 'type': 'HealthProbeParameters'},
'origins': {'key': 'origins', 'type': '[ResourceReference]'},
'traffic_restoration_time_to_healed_or_new_endpoints_in_minutes': {'key': 'trafficRestorationTimeToHealedOrNewEndpointsInMinutes', 'type': 'int'},
'response_based_origin_error_detection_settings': {'key': 'responseBasedOriginErrorDetectionSettings', 'type': 'ResponseBasedOriginErrorDetectionParameters'},
'resource_state': {'key': 'resourceState', 'type': 'str'},
'provisioning_state': {'key': 'provisioningState', 'type': 'str'},
}
def __init__(
self,
*,
health_probe_settings: Optional["HealthProbeParameters"] = None,
origins: Optional[List["ResourceReference"]] = None,
traffic_restoration_time_to_healed_or_new_endpoints_in_minutes: Optional[int] = None,
response_based_origin_error_detection_settings: Optional["ResponseBasedOriginErrorDetectionParameters"] = None,
**kwargs
):
super(OriginGroupProperties, self).__init__(health_probe_settings=health_probe_settings, origins=origins, traffic_restoration_time_to_healed_or_new_endpoints_in_minutes=traffic_restoration_time_to_healed_or_new_endpoints_in_minutes, response_based_origin_error_detection_settings=response_based_origin_error_detection_settings, **kwargs)
self.resource_state = None
self.provisioning_state = None
class OriginGroupUpdateParameters(msrest.serialization.Model):
"""Origin group properties needed for origin group creation or update.
:param health_probe_settings: Health probe settings to the origin that is used to determine the
health of the origin.
:type health_probe_settings: ~azure.mgmt.cdn.models.HealthProbeParameters
:param origins: The source of the content being delivered via CDN within given origin group.
:type origins: list[~azure.mgmt.cdn.models.ResourceReference]
:param traffic_restoration_time_to_healed_or_new_endpoints_in_minutes: Time in minutes to shift
the traffic to the endpoint gradually when an unhealthy endpoint comes healthy or a new
endpoint is added. Default is 10 mins. This property is currently not supported.
:type traffic_restoration_time_to_healed_or_new_endpoints_in_minutes: int
:param response_based_origin_error_detection_settings: The JSON object that contains the
properties to determine origin health using real requests/responses. This property is currently
not supported.
:type response_based_origin_error_detection_settings:
~azure.mgmt.cdn.models.ResponseBasedOriginErrorDetectionParameters
"""
_validation = {
'traffic_restoration_time_to_healed_or_new_endpoints_in_minutes': {'maximum': 50, 'minimum': 0},
}
_attribute_map = {
'health_probe_settings': {'key': 'properties.healthProbeSettings', 'type': 'HealthProbeParameters'},
'origins': {'key': 'properties.origins', 'type': '[ResourceReference]'},
'traffic_restoration_time_to_healed_or_new_endpoints_in_minutes': {'key': 'properties.trafficRestorationTimeToHealedOrNewEndpointsInMinutes', 'type': 'int'},
'response_based_origin_error_detection_settings': {'key': 'properties.responseBasedOriginErrorDetectionSettings', 'type': 'ResponseBasedOriginErrorDetectionParameters'},
}
def __init__(
self,
*,
health_probe_settings: Optional["HealthProbeParameters"] = None,
origins: Optional[List["ResourceReference"]] = None,
traffic_restoration_time_to_healed_or_new_endpoints_in_minutes: Optional[int] = None,
response_based_origin_error_detection_settings: Optional["ResponseBasedOriginErrorDetectionParameters"] = None,
**kwargs
):
super(OriginGroupUpdateParameters, self).__init__(**kwargs)
self.health_probe_settings = health_probe_settings
self.origins = origins
self.traffic_restoration_time_to_healed_or_new_endpoints_in_minutes = traffic_restoration_time_to_healed_or_new_endpoints_in_minutes
self.response_based_origin_error_detection_settings = response_based_origin_error_detection_settings
class OriginListResult(msrest.serialization.Model):
"""Result of the request to list origins. It contains a list of origin objects and a URL | |
context.get_admin_context(), router1['id'],
{'subnet_id': sub['subnet']['id']})
self.l3_plugin.add_router_interface(
context.get_admin_context(), router['id'],
{'subnet_id': sub3['subnet']['id']})
self.l3_plugin.add_router_interface(
context.get_admin_context(), router1['id'],
{'subnet_id': sub_route_leak['subnet']['id']})
self.l3_plugin.add_router_interface(
context.get_admin_context(), router['id'],
{'subnet_id': sub_route_leak1['subnet']['id']})
self.driver._add_ip_mapping_details = mock.Mock()
with self.port(subnet=sub_route_leak,
tenant_id=mocked.APIC_TENANT) as p0:
p0 = p0['port']
self._bind_port_to_host(p0['id'], 'h1')
details = self._get_gbp_details(p0['id'], 'h1')
if self.driver.per_tenant_context and vrf_per_router:
self.assertEqual(mocked.APIC_TENANT,
details['l3_policy_id'])
self.assertEqual(self._tenant(vrf=True),
details['vrf_tenant'])
self.assertEqual(self._network_vrf_name(),
details['vrf_name'])
self.assertEqual(['192.168.4.0/24', '192.168.96.0/24',
'192.168.98.0/24'],
details['vrf_subnets'])
else:
if self.driver.per_tenant_context:
self.assertEqual(mocked.APIC_TENANT,
details['l3_policy_id'])
else:
self.assertEqual('%s-shared' % self._tenant(vrf=True),
details['l3_policy_id'])
self.assertEqual(self._tenant(vrf=True),
details['vrf_tenant'])
self.assertEqual(self._network_vrf_name(),
details['vrf_name'])
self.assertEqual(['192.168.0.0/24', '192.168.2.0/24',
'192.168.4.0/24', '192.168.6.0/24',
'192.168.8.0/24', '192.168.96.0/24',
'192.168.98.0/24'],
details['vrf_subnets'])
with self.port(subnet=sub, tenant_id=mocked.APIC_TENANT) as p1:
p1 = p1['port']
self._bind_port_to_host(p1['id'], 'h1')
details = self._get_gbp_details(p1['id'], 'h1')
if self.driver.per_tenant_context and vrf_per_router:
self.assertEqual('router:%s' % router['id'],
details['l3_policy_id'])
self.assertEqual(self._tenant(vrf=True),
details['vrf_tenant'])
self.assertEqual(
self._routed_network_vrf_name(router=router['id']),
details['vrf_name'])
self.assertEqual(['192.168.0.0/24', '192.168.2.0/24',
'192.168.6.0/24', '192.168.8.0/24',
'192.168.96.0/24', '192.168.98.0/24'],
details['vrf_subnets'])
else:
if self.driver.per_tenant_context:
self.assertEqual(mocked.APIC_TENANT,
details['l3_policy_id'])
else:
self.assertEqual('%s-shared' % self._tenant(vrf=True),
details['l3_policy_id'])
self.assertEqual(self._tenant(vrf=True),
details['vrf_tenant'])
self.assertEqual(self._network_vrf_name(),
details['vrf_name'])
self.assertEqual(['192.168.0.0/24', '192.168.2.0/24',
'192.168.4.0/24', '192.168.6.0/24',
'192.168.8.0/24', '192.168.96.0/24',
'192.168.98.0/24'],
details['vrf_subnets'])
# remove the router interface
self.l3_plugin.remove_router_interface(
context.get_admin_context(), router['id'],
{'subnet_id': sub3['subnet']['id']})
details = self._get_gbp_details(p1['id'], 'h1')
if self.driver.per_tenant_context and vrf_per_router:
self.assertEqual(['192.168.0.0/24', '192.168.2.0/24',
'192.168.96.0/24', '192.168.98.0/24'],
details['vrf_subnets'])
else:
self.assertEqual(['192.168.0.0/24', '192.168.2.0/24',
'192.168.4.0/24', '192.168.6.0/24',
'192.168.8.0/24', '192.168.96.0/24',
'192.168.98.0/24'],
details['vrf_subnets'])
with self.port(subnet=sub2, tenant_id=mocked.APIC_TENANT) as p2:
p2 = p2['port']
self._bind_port_to_host(p2['id'], 'h1')
details = self._get_gbp_details(p2['id'], 'h1')
if self.driver.per_tenant_context:
self.assertEqual(mocked.APIC_TENANT,
details['l3_policy_id'])
else:
self.assertEqual('%s-shared' % self._tenant(vrf=True),
details['l3_policy_id'])
self.assertEqual(self._tenant(vrf=True),
details['vrf_tenant'])
self.assertEqual(self._network_vrf_name(),
details['vrf_name'])
if self.driver.per_tenant_context and vrf_per_router:
self.assertEqual(['192.168.4.0/24', '192.168.6.0/24',
'192.168.8.0/24', '192.168.96.0/24',
'192.168.98.0/24'],
details['vrf_subnets'])
else:
self.assertEqual(['192.168.0.0/24', '192.168.2.0/24',
'192.168.4.0/24', '192.168.6.0/24',
'192.168.8.0/24', '192.168.96.0/24',
'192.168.98.0/24'],
details['vrf_subnets'])
def test_vrf_details(self):
self._test_vrf_details()
def test_vrf_details_vrf_per_router(self):
self.driver.vrf_per_router_tenants.append(mocked.APIC_TENANT)
self._test_vrf_details(vrf_per_router=True)
def test_add_router_interface_on_shared_net_by_port(self):
net = self.create_network(
tenant_id='onetenant', expected_res_status=201, shared=True,
is_admin_context=True)['network']
sub = self.create_subnet(
network_id=net['id'], cidr='192.168.0.0/24',
ip_version=4, is_admin_context=True)
router = self.create_router(api=self.ext_api,
expected_res_status=201)['router']
with self.port(subnet=sub, tenant_id='anothertenant') as p1:
self.mgr.add_router_interface = mock.Mock()
self.l3_plugin.add_router_interface(
context.get_admin_context(), router['id'],
{'port_id': p1['port']['id']})
self.mgr.add_router_interface.assert_called_once_with(
self._tenant(neutron_tenant='onetenant'),
self._scoped_name(
router['id'], tenant=TEST_TENANT), net['id'],
app_profile_name=self._app_profile(neutron_tenant='onetenant'))
self.mgr.remove_router_interface = mock.Mock()
# Test removal
self.l3_plugin.remove_router_interface(
context.get_admin_context(), router['id'],
{'port_id': p1['port']['id']})
self.mgr.remove_router_interface.assert_called_once_with(
self._tenant(neutron_tenant='onetenant'),
self._scoped_name(router['id'], tenant=TEST_TENANT), net['id'],
app_profile_name=self._app_profile(neutron_tenant='onetenant'))
def test_add_router_interface_on_shared_net_by_subnet(self):
net = self.create_network(
tenant_id='onetenant', expected_res_status=201, shared=True,
is_admin_context=True)['network']
sub = self.create_subnet(
network_id=net['id'], cidr='192.168.0.0/24',
ip_version=4, is_admin_context=True,
tenant_id='anothertenant')['subnet']
router = self.create_router(api=self.ext_api,
expected_res_status=201)['router']
self.mgr.add_router_interface = mock.Mock()
self.l3_plugin.add_router_interface(
context.get_admin_context(), router['id'],
{'subnet_id': sub['id']})
self.mgr.add_router_interface.assert_called_once_with(
self._tenant(neutron_tenant='onetenant'),
self._scoped_name(router['id'], tenant=TEST_TENANT), net['id'],
app_profile_name=self._app_profile(neutron_tenant='onetenant'))
self.mgr.remove_router_interface = mock.Mock()
# Test removal
self.l3_plugin.remove_router_interface(
context.get_admin_context(), router['id'],
{'subnet_id': sub['id']})
self.mgr.remove_router_interface.assert_called_once_with(
self._tenant(neutron_tenant='onetenant'),
self._scoped_name(router['id'], tenant=TEST_TENANT), net['id'],
app_profile_name=self._app_profile(neutron_tenant='onetenant'))
def test_sync_on_demand(self):
self.synchronizer.reset_mock()
self.create_network(name=acst.APIC_SYNC_NETWORK, is_admin_context=True)
self.assertTrue(self.synchronizer._sync_base.called)
def test_sync_on_demand_no_admin(self):
self.synchronizer.reset_mock()
self.create_network(name=acst.APIC_SYNC_NETWORK)
self.assertFalse(self.synchronizer._sync_base.called)
def test_sync_on_demand_not(self):
self.synchronizer.reset_mock()
self.create_network(name='some_name', is_admin_context=True,
expected_res_status=201)
self.assertFalse(self.synchronizer._sync_base.called)
def test_attestation(self):
self._register_agent('h1')
net = self.create_network(
tenant_id='onetenant', expected_res_status=201)['network']
expected_attestation = {'ports': [{'switch': '102',
'port': 'eth4/23'}],
'policy-space-name': self._tenant(
neutron_tenant='onetenant'),
'endpoint-group-name': (
self._app_profile(
neutron_tenant='onetenant') + '|' +
net['id'])}
sub = self.create_subnet(
tenant_id='onetenant', network_id=net['id'], cidr='192.168.0.0/24',
ip_version=4)
self.driver.apic_manager.get_switch_and_port_for_host = mock.Mock(
return_value=[('102', 'eth4/23')])
with self.port(subnet=sub, tenant_id='onetenant') as p1:
p1 = p1['port']
self._bind_port_to_host(p1['id'], 'h1')
self.driver._add_ip_mapping_details = mock.Mock()
# Mock switch, module and port for host
details = self._get_gbp_details(p1['id'], 'h1')
# Test attestation exists
self.assertTrue('attestation' in details)
self.assertEqual(1, len(details['attestation']))
observed_attestation = base64.b64decode(
details['attestation'][0]['validator'])
# It's a json string
observed_attestation_copy = observed_attestation
# Unmarshal
observed_attestation = json.loads(observed_attestation)
del observed_attestation['timestamp']
del observed_attestation['validity']
self.assertEqual(expected_attestation, observed_attestation)
self.assertEqual(details['attestation'][0]['name'], p1['id'])
# Validate decrypting
observed_mac = base64.b64decode(
details['attestation'][0]['validator-mac'])
expected_mac = hmac.new(
'dirtylittlesecret', msg=observed_attestation_copy,
digestmod=hashlib.sha256).digest()
# Validation succeeded
self.assertEqual(expected_mac, observed_mac)
def test_dhcp_notifications_on_create(self):
self._register_agent('h1')
net = self.create_network(
expected_res_status=201, shared=True,
is_admin_context=True)['network']
sub = self.create_subnet(
network_id=net['id'], cidr='192.168.0.0/24',
ip_version=4, is_admin_context=True)
with self.port(subnet=sub) as p1:
self._bind_port_to_host(p1['port']['id'], 'h1')
with self.port(subnet=sub) as p2:
self._bind_port_to_host(p2['port']['id'], 'h1')
self.driver.notifier.reset_mock()
with self.port(subnet=sub, device_owner="network:dhcp"):
self.assertEqual(
2, self.driver.notifier.port_update.call_count)
p1 = self.show_port(p1['port']['id'],
is_admin_context=True)['port']
p2 = self.show_port(p2['port']['id'],
is_admin_context=True)['port']
expected_calls = [
mock.call(mock.ANY, p1),
mock.call(mock.ANY, p2)]
self._check_call_list(
expected_calls,
self.driver.notifier.port_update.call_args_list)
def test_dhcp_notifications_on_update(self):
self._register_agent('h1')
net = self.create_network(
expected_res_status=201, shared=True,
is_admin_context=True)['network']
sub = self.create_subnet(
network_id=net['id'], cidr='192.168.0.0/24',
ip_version=4, is_admin_context=True)
sub2 = self.create_subnet(
network_id=net['id'], cidr='192.168.1.0/24',
ip_version=4, is_admin_context=True)
with self.port(subnet=sub) as p1:
# Force port on a specific subnet
self.update_port(
p1['port']['id'],
fixed_ips=[{'subnet_id': sub['subnet']['id']}],
is_admin_context=True)
self._bind_port_to_host(p1['port']['id'], 'h1')
with self.port(subnet=sub2) as p2:
# Force port on a specific subnet
self.update_port(
p2['port']['id'],
fixed_ips=[{'subnet_id': sub2['subnet']['id']}],
is_admin_context=True)
self._bind_port_to_host(p2['port']['id'], 'h1')
self.driver.notifier.port_update.reset_mock()
with self.port(subnet=sub, device_owner="network:dhcp") as p3:
# Only sub 1 notifies
self.assertEqual(
1, self.driver.notifier.port_update.call_count)
# Force port on a specific subnet
self.update_port(
p3['port']['id'],
fixed_ips=[{'subnet_id': sub['subnet']['id']}],
is_admin_context=True)
self.driver.notifier.port_update.reset_mock()
# Switch DHCP port to sub2
self.update_port(
p3['port']['id'],
fixed_ips=[{'subnet_id': sub2['subnet']['id']}],
is_admin_context=True)
self.assertEqual(
2, self.driver.notifier.port_update.call_count)
p1 = self.show_port(p1['port']['id'],
is_admin_context=True)['port']
p2 = self.show_port(p2['port']['id'],
is_admin_context=True)['port']
expected_calls = [
mock.call(mock.ANY, p1),
mock.call(mock.ANY, p2)]
self._check_call_list(
expected_calls,
self.driver.notifier.port_update.call_args_list)
def test_overlapping_ip_ownership(self):
ha_handler = ha.HAIPOwnerDbMixin()
net1 = self.create_network(
tenant_id='onetenant', expected_res_status=201, shared=True,
is_admin_context=True)['network']
sub1 = self.create_subnet(
network_id=net1['id'], cidr='192.168.0.0/24',
ip_version=4, is_admin_context=True)
# Create another network with the same subnet
net2 = self.create_network(
tenant_id='onetenant', expected_res_status=201, shared=True,
is_admin_context=True)['network']
sub2 = self.create_subnet(
network_id=net2['id'], cidr='192.168.0.0/24',
ip_version=4, is_admin_context=True)
# Create 2 ports in each subnet, with the same IP address
with self.port(subnet=sub1, fixed_ips=[{'ip_address':
'192.168.0.4'}]) as p1:
with self.port(subnet=sub2, fixed_ips=[{'ip_address':
'192.168.0.4'}]) as p2:
p1 = p1['port']
p2 = p2['port']
# Verify the two IPs are the same
self.assertEqual([x['ip_address'] for x in p1['fixed_ips']],
[x['ip_address'] for x in p2['fixed_ips']])
# Set P1 as owner
ha_handler.update_ip_owner(
{'port': p1['id'], 'ip_address_v4': '192.168.0.4'})
# Ownership is set in the DB for P1
own_p1 = ha_handler.ha_ip_handler.get_ha_ipaddresses_for_port(
p1['id'])
self.assertEqual(['192.168.0.4'], own_p1)
# Set P2 as owner
ha_handler.update_ip_owner(
{'port': p2['id'], 'ip_address_v4': '192.168.0.4'})
# Ownership is set in the DB for P2
own_p2 = ha_handler.ha_ip_handler.get_ha_ipaddresses_for_port(
p2['id'])
self.assertEqual(['192.168.0.4'], own_p2)
# P1 is still there
own_p1 = ha_handler.ha_ip_handler.get_ha_ipaddresses_for_port(
p1['id'])
self.assertEqual(['192.168.0.4'], own_p1)
# Verify number of entries is exactly 2
session = db_api.get_session()
entries = session.query(
ha.HAIPAddressToPortAssocation).all()
self.assertEqual(2, len(entries))
def test_ip_address_owner_update(self):
net = self.create_network(
tenant_id=mocked.APIC_TENANT, expected_res_status=201)['network']
self.create_subnet(
tenant_id=mocked.APIC_TENANT,
network_id=net['id'], cidr='10.0.0.0/24', ip_version=4)['subnet']
p1 = self.create_port(
network_id=net['id'], tenant_id=mocked.APIC_TENANT,
device_owner='compute:', device_id='someid')['port']
p2 = self.create_port(
network_id=net['id'], tenant_id=mocked.APIC_TENANT,
device_owner='compute:', device_id='someid')['port']
ip_owner_info = {'port': p1['id'], 'ip_address_v4': '1.2.3.4'}
self.driver.notify_port_update = mock.Mock()
# set new owner
self.driver.ip_address_owner_update(
context.get_admin_context(),
ip_owner_info=ip_owner_info, host='h1')
obj = self.driver.ha_ip_handler.get_port_for_ha_ipaddress(
'1.2.3.4', net['id'])
self.assertEqual(p1['id'], obj['port_id'])
self.driver.notify_port_update.assert_called_with(p1['id'])
# update existing owner
self.driver.notify_port_update.reset_mock()
ip_owner_info['port'] = p2['id']
self.driver.ip_address_owner_update(
context.get_admin_context(),
ip_owner_info=ip_owner_info, host='h2')
obj = self.driver.ha_ip_handler.get_port_for_ha_ipaddress(
'1.2.3.4', net['id'])
self.assertEqual(p2['id'], obj['port_id'])
exp_calls = [
mock.call(p1['id']),
mock.call(p2['id'])]
self._check_call_list(
exp_calls, self.driver.notify_port_update.call_args_list)
def test_gbp_details_for_allowed_address_pair(self):
self._register_agent('h1')
self._register_agent('h2')
net = self.create_network(
tenant_id=mocked.APIC_TENANT, expected_res_status=201)['network']
sub1 = self.create_subnet(
tenant_id=mocked.APIC_TENANT,
network_id=net['id'], cidr='10.0.0.0/24', ip_version=4)['subnet']
sub2 = self.create_subnet(
tenant_id=mocked.APIC_TENANT,
network_id=net['id'], cidr='1.2.3.0/24', ip_version=4)['subnet']
allow_addr = [{'ip_address': '192.168.3.11',
'mac_address': '00:00:00:AA:AA:AA'},
{'ip_address': '192.168.3.11',
'mac_address': '00:00:00:BB:BB:BB'}]
# create 2 ports with same allowed-addresses
p1 = self.create_port(
network_id=net['id'], tenant_id=mocked.APIC_TENANT,
device_owner='compute:', device_id='someid',
fixed_ips=[{'subnet_id': sub1['id']}],
allowed_address_pairs=allow_addr)['port']
p2 = self.create_port(
network_id=net['id'], tenant_id=mocked.APIC_TENANT,
device_owner='compute:', device_id='someid',
fixed_ips=[{'subnet_id': sub1['id']}],
allowed_address_pairs=allow_addr)['port']
self._bind_port_to_host(p1['id'], 'h1')
self._bind_port_to_host(p2['id'], 'h2')
self.driver.ha_ip_handler.set_port_id_for_ha_ipaddress(
p1['id'], '1.2.3.250')
self.driver.ha_ip_handler.set_port_id_for_ha_ipaddress(
p2['id'], '1.2.3.251')
allow_addr[0]['active'] = True
details = self._get_gbp_details(p1['id'], 'h1')
self.assertEqual(allow_addr, details['allowed_address_pairs'])
del allow_addr[0]['active']
allow_addr[1]['active'] = True
details = self._get_gbp_details(p2['id'], 'h2')
self.assertEqual(allow_addr, details['allowed_address_pairs'])
# set allowed-address as fixed-IP of ports p3 and p4, which also have
# floating-IPs. Verify that FIP is "stolen" by p1 and p2
net_ext = self.create_network(
is_admin_context=True, tenant_id=mocked.APIC_TENANT,
**{'router:external': 'True'})['network']
self.create_subnet(
is_admin_context=True, tenant_id=mocked.APIC_TENANT,
network_id=net_ext['id'], cidr='8.8.8.0/24',
ip_version=4)['subnet']
p3 = self.create_port(
network_id=net['id'], tenant_id=mocked.APIC_TENANT,
fixed_ips=[{'subnet_id': sub2['id'],
'ip_address': '192.168.3.11'}])['port']
p4 = self.create_port(
network_id=net['id'], tenant_id=mocked.APIC_TENANT,
fixed_ips=[{'subnet_id': sub2['id'],
'ip_address': '192.168.3.11'}])['port']
rtr = self.create_router(
api=self.ext_api, tenant_id=mocked.APIC_TENANT,
external_gateway_info={'network_id': net_ext['id']})['router']
self.l3_plugin.add_router_interface(
context.get_admin_context(), rtr['id'], {'subnet_id': sub2['id']})
fip1 = self.create_floatingip(
tenant_id=mocked.APIC_TENANT, port_id=p3['id'],
floating_network_id=net_ext['id'],
api=self.ext_api)['floatingip']
fip2 = self.create_floatingip(
tenant_id=mocked.APIC_TENANT, port_id=p4['id'],
floating_network_id=net_ext['id'],
api=self.ext_api)['floatingip']
details = self._get_gbp_details(p1['id'], 'h1')
self.assertEqual(1, len(details['floating_ip']))
self.assertEqual(
fip1['floating_ip_address'],
details['floating_ip'][0]['floating_ip_address'])
details = self._get_gbp_details(p2['id'], 'h2')
self.assertEqual(1, len(details['floating_ip']))
self.assertEqual(
fip2['floating_ip_address'],
details['floating_ip'][0]['floating_ip_address'])
# verify FIP updates: update to p3, p4 should also update p1 and p2
self.driver.notify_port_update = mock.Mock()
self.driver.notify_port_update_for_fip(p3['id'])
expected_calls = [
mock.call(p, mock.ANY)
for p in sorted([p1['id'], p2['id'], p3['id']])]
self._check_call_list(
expected_calls, self.driver.notify_port_update.call_args_list)
self.driver.notify_port_update.reset_mock()
self.driver.notify_port_update_for_fip(p4['id'])
expected_calls = [
mock.call(p, mock.ANY)
for p in sorted([p1['id'], p2['id'], p4['id']])]
self._check_call_list(
expected_calls, self.driver.notify_port_update.call_args_list)
def test_gbp_details_for_route_leak_network(self):
self.driver.per_tenant_context = True
self.driver.vrf_per_router_tenants.append(mocked.APIC_TENANT)
self._register_agent('h1')
net_route_leak = self.create_network(
tenant_id=mocked.APIC_TENANT, expected_res_status=201,
**{'apic:allow_route_leak': 'True'})['network']
sub_route_leak = self.create_subnet(
tenant_id=mocked.APIC_TENANT,
network_id=net_route_leak['id'], cidr='10.0.0.0/24',
ip_version=4)['subnet']
rtr1 = self.create_router(
api=self.ext_api, tenant_id=mocked.APIC_TENANT)['router']
self.l3_plugin.add_router_interface(
context.get_admin_context(), rtr1['id'],
{'subnet_id': sub_route_leak['id']})
p1 = self.create_port(
network_id=net_route_leak['id'], tenant_id=mocked.APIC_TENANT,
device_owner='compute:', device_id='someid',
fixed_ips=[{'subnet_id': sub_route_leak['id'],
'ip_address': '10.0.0.10'}])['port']
self._bind_port_to_host(p1['id'], 'h1')
rtr2 = self.create_router(
api=self.ext_api, tenant_id=mocked.APIC_TENANT)['router']
p2 = self.create_port(
network_id=net_route_leak['id'], tenant_id=mocked.APIC_TENANT,
device_owner='network:router_interface',
fixed_ips=[{'subnet_id': sub_route_leak['id'],
'ip_address': '10.0.0.2'}])['port']
self.l3_plugin.add_router_interface(
context.get_admin_context(), rtr2['id'],
{'port_id': p2['id']})
# use_routing_context router
rtr3 = self.create_router(
api=self.ext_api, tenant_id=mocked.APIC_TENANT,
**{'apic:use_routing_context': rtr1['id']})['router']
p3 = self.create_port(
network_id=net_route_leak['id'], tenant_id=mocked.APIC_TENANT,
device_owner='network:router_interface',
fixed_ips=[{'subnet_id': sub_route_leak['id'],
'ip_address': '10.0.0.3'}])['port']
self.l3_plugin.add_router_interface(
context.get_admin_context(), rtr3['id'],
{'port_id': p3['id']})
details = self._get_gbp_details(p1['id'], 'h1')
self.assertEqual(2, len(details['floating_ip']))
self.assertEqual('10.0.0.10',
details['floating_ip'][0]['fixed_ip_address'])
self.assertEqual('10.0.0.10',
details['floating_ip'][0]['floating_ip_address'])
self.assertEqual(self._tenant(),
details['floating_ip'][0]['nat_epg_tenant'])
self.assertEqual(self._app_profile(),
details['floating_ip'][0]['nat_epg_app_profile'])
leak_epg_name = 'Leak-%s-%s' % (rtr1['id'],
net_route_leak['id'])
self.assertEqual(leak_epg_name,
details['floating_ip'][0]['nat_epg_name'])
self.assertEqual('10.0.0.10',
details['floating_ip'][1]['fixed_ip_address'])
self.assertEqual('10.0.0.10',
details['floating_ip'][1]['floating_ip_address'])
self.assertEqual(self._tenant(),
details['floating_ip'][1]['nat_epg_tenant'])
self.assertEqual(self._app_profile(),
details['floating_ip'][1]['nat_epg_app_profile'])
leak_epg_name = 'Leak-%s-%s' % (rtr2['id'],
net_route_leak['id'])
self.assertEqual(leak_epg_name,
details['floating_ip'][1]['nat_epg_name'])
def test_notify_router_interface_update(self):
exc = driver.InterTenantRouterInterfaceNotAllowedOnPerTenantContext
net = self.create_network(
tenant_id='onetenant', expected_res_status=201, shared=True,
is_admin_context=True)['network']
sub = self.create_subnet(
network_id=net['id'], cidr='192.168.0.0/24',
ip_version=4, is_admin_context=True)
router = self.create_router(api=self.ext_api,
expected_res_status=201)['router']
self._register_agent('h1')
with self.port(subnet=sub, tenant_id='anothertenant',
device_owner='network:router_interface') as p1:
with self.port(subnet=sub, tenant_id='anothertenant') as p2:
self._bind_port_to_host(p2['port']['id'], 'h1')
self.mgr.add_router_interface = mock.Mock()
if self.driver.per_tenant_context:
self.assertRaises(
exc,
self.l3_plugin.add_router_interface,
| |
<reponame>basvandervlies/cfengine_beautifier<filename>cfbeautifier/parser.py
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
from . import lexer
from .util import ParserError
from . import structure
from . import util
from .version_abstraction import text_class
from .ply import yacc
import os
import re
import sys
tokens = lexer.tokens
# This must be first by line number, and cannot be declared in grammar variable below, since all
# the functions in "grammar" share the same line number, and their order is unpredicatable
def p_specification(p):
"specification : blocks_node"
p[0] = p[1]
def declare_grammar():
# return a convert_fn that returns the nth element from the parsed elements
def nth(index):
def fn(position, *elements):
return elements[index]
fn.__name__ = str("nth(%d)" % index) # str for Python 2
return fn
# convert_fn that returns the first parsed element
first = nth(0)
# convert_fn that just returns an empty list
def empty_list(position, *elements):
return []
def modified(**kwargs):
def fn(position, element):
for key, value in kwargs.items():
setattr(element, key, value)
return element
fn.__name__ = str("_".join(kwargs.keys())) # str for Python 2
return fn
parent_gets_comments = modified(priority_of_giving_parent_comments = 1)
keeps_comments = modified(priority_of_giving_parent_comments = 0)
def priority_comments(priority, convert_fn):
"""
return a convert function that calls the given convert_fn and sets
the priority_of_giving_parent_comments on the returned element.
Returned function takes one element (and position) as argument.
"""
def wrapper(position, element):
element = convert_fn(position, element)
element.priority_of_giving_parent_comments = priority
return element
wrapper.__name__ = str(convert_fn.__name__ + ("_priority_%d" % priority)) # str for Python 2
return wrapper
# convert_fn to return None
def none(*args): None
def line_broken_list(position, open_brace, items, *rest):
# Difficult to do this via the grammar since both ArgumentList and List contain litems,
# but ArgumentList items should not respect empty lines
for item in items:
item.respects_preceding_empty_line = True
return structure.List(position, open_brace, items, *rest)
def list_of(name, list_class,
open = "none", close = "none", comma_separated = False, empty = False):
format_args = { "plural" : name + "s",
"name" : name,
"open" : open,
"close" : close }
if comma_separated:
def append(position, head, comma, last):
head.append(last)
return head
format_args["comma"] = "COMMA "
else:
def append(position, head, last):
head.append(last)
return head
format_args["comma"] = ""
def as_list(position, element):
return [element]
accumulator = [["{name}s : {name}", as_list],
["{name}s : {name}s {comma}{name}", append]]
if empty:
accumulator.append(["{name}s : ", empty_list])
constructor = [["{plural}_node : {open} {plural} none {close}", list_class]]
return list(map(lambda statement_and_convert_fn:
[statement_and_convert_fn[0].format(**format_args),
statement_and_convert_fn[1]],
accumulator + constructor))
# Argument order in structure classes' constructors must be such that they can be used as convert_fn
grammar = (
list_of("block", structure.Specification, empty = True) +
[["""block : bundle
| body""", first]] +
list_of("aitem", structure.ArgumentList,
open = "open_paren", close = "close_paren", comma_separated = True, empty = True) +
# The nones must match to what is in list_of function
[["aitems_node : none empty none none", structure.ArgumentList],
["aitem : id", keeps_comments],
["bundle : bundle_token id id aitems_node bundle_statements_node", structure.Bundle]] +
list_of("bundle_statement", structure.PromiseTypeList,
open = "comments_keeping_open_brace", close = "close_brace", empty = True) +
[["bundle_statement : promise_type classpromises_node", structure.PromiseType],
["promise_type : PROMISE_TYPE", structure.String]] +
list_of("classpromise", structure.ClassPromiseList, empty = True) +
[["""classpromise : class
| promise_line""", first],
["""promise_line : promiser_statement
| promisee_statement""", first],
# maybe_comma is not part of the syntax, but was found in CFEngine 3.6rc1 configs,
# and is accepted by CFEngine
["promiser_statement : string none none maybe_comma constraints_node semicolon",
structure.Promise],
["promisee_statement : string arrow rval maybe_comma constraints_node semicolon",
structure.Promise]] +
list_of("constraint", structure.ConstraintList, empty = True) +
[["constraint : constraint_id assign rval maybe_comma", structure.Constraint],
["constraint_id : id", parent_gets_comments],
["body : body_token id id aitems_node bodyattribs_node", structure.Body]] +
list_of("bodyattrib", structure.ClassSelectionList,
open = "comments_keeping_open_brace", close = "close_brace") +
[["""bodyattrib : class
| selection semicolon""", first],
["selection : constraint_id assign rval maybe_comma", structure.Selection],
["class : class_expression", structure.Class],
["""rval : id
| symbol
| string
| list
| usefunction
| nakedvar""", parent_gets_comments],
["list : open_brace litems maybe_comma close_brace", line_broken_list],
["""litem : id
| string
| symbol
| nakedvar
| usefunction""", keeps_comments]] +
list_of("litem", structure.ArgumentList,
open = "open_paren", close = "close_paren", comma_separated = True, empty = True) +
[["""maybe_comma : none
| comma""", first],
["""semicolon : SEMICOLON
close_paren : CLOSE_PAREN""", priority_comments(2, structure.String)],
["""comments_keeping_open_brace : OPEN_BRACE
close_brace : CLOSE_BRACE""", structure.String],
[""" comma : COMMA
string : QSTRING
nakedvar : NAKEDVAR
open_brace : OPEN_BRACE
open_paren : OPEN_PAREN
class_expression : CLASS
bundle_token : BUNDLE
body_token : BODY
assign : ASSIGN
symbol : SYMBOL
arrow : ARROW
id : IDSYNTAX""", priority_comments(1, structure.String)],
["usefunction : functionid litems_node", structure.Function],
["""functionid : id
| symbol
| nakedvar""", first],
["none : ", none],
["empty :", empty_list]])
# Declares a function (names p_...) as required by PLY. Expression is the grammar expression
# (docstring in ply p_ function). The generated function calls convert_fn with Position
# of the first element and all the matched elements as argument. convert_fn must return
# the projection (normally, by using the given elements).
def declare_grammar_function(expression, convert_fn):
# Add p_ prefix and clean up characters that are invalid in a function
function_name = "p_%s" % re.sub(r"[:| \n]+", "_", expression)
def fn(p):
p_size = len(p)
if 1 < p_size:
end_index = p_size - 1
last = p[end_index]
# Any other element must end where the last string ended
# This is a workaround for PLY in some cases extending the covered space
# until the next encountered element. -> Use last_end_of and last_end_line_number
# from lexer for other elements.
if isinstance(last, text_class):
# Only encountering a matched string may change the position
lexer.last_end_pos = p.lexpos(end_index) + len(last)
# The string may contain line breaks
lexer.last_end_line_number = p.linespan(end_index)[1] + last.count("\n")
position = structure.Position(start_line_number = p.lineno(1),
end_line_number = lexer.last_end_line_number,
start_pos = p.lexpos(1),
end_pos = lexer.last_end_pos,
parse_index = lexer.parse_index)
else:
position = None
# The elements will still need to be sorted to the order in which they were encountered,
# in order to assign comments to the right node
lexer.parse_index += 1
p[0] = convert_fn(position, *p[1:])
fn.__doc__ = expression
fn.__name__ = str(function_name) # str for Python 2
setattr(sys.modules[__name__], function_name, fn)
for line in grammar:
declare_grammar_function(*line)
declare_grammar()
def p_error(p):
if p:
raise ParserError(p.value, p.lineno, p.lexer.lexdata, p.lexpos)
else: # End of file
# Unfortunately, cannot pass in the string and figure out the last line number, since yacc
# does not give the input string.
raise ParserError("End of file", 0, "", 0)
yacc.yacc(debug = False,
picklefile = os.path.join(os.path.dirname(os.path.realpath(__file__)), "parsetab.pickle"))
######
def specification_from_string(string, options):
def comments(comment_tokens, empty_line_numbers, last_line_number):
def is_at_end_of_line(token, previous_eol_pos):
# max, since if at beginning of doc, previous_eol_pos is -1, and the substring would
# be empty, and not the actual substring from start of doc until the token
return not not re.search(r"[^\t \n]", string[max(previous_eol_pos, 0):token.lexpos])
def position(token):
return structure.Position(start_line_number = token.lineno,
end_line_number = token.lineno,
start_pos = token.lexpos,
end_pos = token.lexpos + len(token.value))
class State(object):
"Contains state of comment parsing"
def __init__(self):
self.comments = []
self.current_comment = []
def add_comment(self, comment):
if self.current_comment:
if not self.current_comment.type:
if (self.current_comment.position.end_line_number + 1 in empty_line_numbers
or self.current_comment.position.end_line_number == last_line_number):
# This comment is not related to a node (if it is found in a List of
# some kind)
self.current_comment.type = "standalone"
else:
# This comment probably describes the next Node
self.current_comment.type = "next-node"
self.comments.insert(0, self.current_comment)
self.current_comment = comment
def commit_comment(self):
self.add_comment(None)
def is_part_of_current_comment(self, line_number):
return(self.current_comment and
self.current_comment.position.start_line_number - 1 == line_number)
state = State()
for token in reversed(comment_tokens):
previous_eol_pos = util.previous_end_of_line_pos(string, token.lexpos)
# The original indentation is used to figure out whether standalone comments belong to
# promise type list or class promise list
original_indentation = token.lexpos - previous_eol_pos - 1
if is_at_end_of_line(token, previous_eol_pos):
state.add_comment(structure.Comment(position(token), token.value,
original_indentation,
type = "end-of-line"))
state.commit_comment() # Don't add lines to end-of-line comment
elif state.is_part_of_current_comment(token.lineno):
state.current_comment.prepend_line(position(token), token.value)
else:
state.add_comment(structure.Comment(position(token), token.value,
original_indentation))
state.commit_comment()
return state.comments
def line_numbers_of_empty_lines(string):
return [index + 1
for index, line in enumerate(string.split("\n"))
if re.match(r"^[ \t\r]*$", line)]
def set_empty_lines(nodes, empty_line_numbers):
nodes = filter(lambda node: node.consumes_preceding_empty_line, nodes) # github #6
def line_number_to_node_map():
node_by_line_number = {}
last_line_number = -1
for node in nodes:
node_line_number = node.start_line_number_with_comment()
if last_line_number != node_line_number:
node_by_line_number[node_line_number] = node
last_line_number = node_line_number
return node_by_line_number
node_by_line_number = line_number_to_node_map()
for line_number in empty_line_numbers:
node = node_by_line_number.get(line_number + 1)
if | |
<filename>main_fr.py
import imp
import time
#======================================================================
def board_print(board, move=[], num=0):
print("====== The current board(", num, ") is (after move): ======")
if move: # move not empty
print("move = ", move)
for i in [4, 3, 2, 1, 0]:
print(i, ":", end=" ")
for j in range(5):
print(board[i][j], end=" ")
print()
print(" ", 0, 1, 2, 3, 4)
print("")
def board_copy(board):
new_board = [[]]*5
for i in range(5):
new_board[i] = [] + board[i]
return new_board
#======================================================================
# Student SHOULD implement this function to change current state to new state properly
# Create dictionary for future use
neighborDict = {}
adjacentDict = {}
neighborPosL = []
adjacentPosL = []
for r in range(5):
for c in range(5):
neighborPosL = [(r, c - 1), (r, c + 1), (r - 1, c), (r + 1, c), (r - 1, c - 1), (r + 1, c + 1), (r - 1, c + 1), (r + 1, c - 1)]
if (r % 2 == 0 and c % 2 != 0) or (r % 2 != 0 and c % 2 == 0):
adjacentPosL = [(r - 1, c), (r, c - 1), (r, c + 1), (r + 1, c)]
adjacentPosL = list( filter(lambda x: (0 <= x[0] < 5) and (0 <= x[1] < 5), adjacentPosL) )
neighborPosL = list( filter(lambda x: (0 <= x[0] < 5) and (0 <= x[1] < 5), neighborPosL) )
neighborDict[r*5 + c] = neighborPosL
adjacentDict[r*5 + c] = adjacentPosL
else:
adjacentPosL = neighborPosL = list( filter(lambda x: (0 <= x[0] < 5) and (0 <= x[1] < 5), neighborPosL) )
adjacentDict[r*5 + c] = neighborDict[r*5 + c] = neighborPosL
adjacentPosL = neighborPosL = []
def traverse_CHET(startPos, currColor, oppColor, state, q = []):
# startPos: starting position for traversing; (r, c)
# currColor: current player's color
# oppColor: opponent's color
# state: board game
# q: list saving opponents' positions of which colors were changed
# return True if no way out, else False
# index = startPos[0]*5 + startPos[1]
# aL = adjacentDict[index]
#state[ startPos[0] ][ startPos[1] ] = currColor
############################### DFS
state[ startPos[0] ][ startPos[1] ] = currColor
q.append(startPos)
for x in adjacentDict[ startPos[0]*5 + startPos[1] ]:
if (state[ x[0] ][ x[1] ] == '.') or ( state[ x[0] ][ x[1] ] == oppColor and ( not traverse_CHET(x, currColor, oppColor, state, q) ) ):
while(q[-1] != startPos):
state[ q[-1][0] ][ q[-1][1] ] = oppColor
q.pop()
state[ startPos[0] ][ startPos[1] ] = oppColor
q.pop()
return False
return True
############################### BFS
#l = []
# state[ startPos[0] ][ startPos[1] ] = currColor
# q.append(startPos)
# for x in l:
# if state[ x[0] ][ x[1] ] == oppColor and ( not traverse_CHET(x, currColor, oppColor, state, q) ):
# while(q[-1] != startPos)
# state[ q[-1][0] ][ q[-1][1] ] = oppColor
# q.pop()
# state[ startPos[0] ][ startPos[1] ] = oppColor
# q.pop()
# return False
# #l.append(x)
# return True
def doit(move, state, a):
# move: a list of two tuples
# (row0, col0): current position of selected piece
# (row1, col1): new position of selected piece
# state: a list of 5 list, simulating the game board
# a: a list of anything (if needed) [curr_player, board_num, state, move, [list of traps (if exist)]]
if not move or not state:
return None
#else:
if move[0] == move[1]:
return None
row0 = move[0][0]
col0 = move[0][1]
row1 = move[1][0]
col1 = move[1][1]
if row0 not in range(5) or col0 not in range(5) or row1 not in range(5) or col1 not in range(5):
return None
if state[ row0 ][ col0 ] == '.' or state[ row1 ][ col1 ] in ['b', 'r']:
return None
if state[row0][col0] != str(a[0]):
return None
#else: # check if two points are adjacent
index0 = row0*5 + col0
index1 = row1*5 + col1
if move[1] not in adjacentDict[index0]:
return None
# if (row1 - row0) not in [-1, 0, 1] or (col1 - col0) not in [-1, 0, 1]:
# return None
# # evenL = [0, 2, 4]
# # oddL = [1, 3]
# isDiagonal = True
# if (row0 % 2 == 0 and col0 % 2 != 0) or (row0 % 2 != 0 and col0 % 2 == 0):
# isDiagonal = False
# if (row1 - row0) in [-1, 1] and (col1 - col0) in [-1, 1]: # not allow to move diagonally in these positions
# return None
#else: # the move is valid except when previous move is a trapping move, we should do more check @@
if a[1] == 0: # starting point of the game, the first move
#a.append(state) # previous board game
#a.append(move) # current move now considered as previous move
a.append([]) # list of tuples containing positions of traps created by previous move
#TODO: make some changes to the state
# create new board for the legal move
new_state = board_copy(state)
new_state[row0][col0] = '.'
new_state[row1][col1] = state[row0][col0]
return new_state
#else: # should check the previous move and compare the previous state with the current state
#isTrapping = False
if a[-1]: # trapped turn
#isTrapping = True
print("trap" + str(a[-1]))
if move[1] not in a[-1]: # previous move is a trapping move, so check for the current move correctness
return None
a[-1] = [] # no need of saving these traps anymore
#TODO: make some changes to the state
# if isDiagonal:
# pL = [(row1 - 1, col1 - 1), (row1 - 1, col1), (row1 - 1, col1 + 1), (row1, col1 - 1), (row1, col1 + 1), (row1 + 1, col1 - 1), (row1 + 1, col1), (row1 + 1, col1 + 1)]
# else:
# pL = [(row1 - 1, col1), (row1, col1 - 1), (row1, col1 + 1), (row1 + 1, col1)]
currColor = state[row0][col0]
oppColor = 'r' if currColor == 'b' else 'b'
# oppL = list(filter(lambda x: x[0] in range(5) and x[1] in range(5) and x != move[0] and state[ x[0] ][ x[1] ] not in ['.', currColor], pL)) # list saving positions around the target point which have the opponent's chessmans
#new_state = board_copy(state)
new_state = state
new_state[row1][col1] = currColor
new_state[row0][col0] = '.'
sameL = []
oppL = []
pointL = []
for x in adjacentDict[index1]:
if new_state[ x[0] ][ x[1] ] == oppColor:
oppL.append(x)
elif new_state[ x[0] ][ x[1] ] == currColor:
sameL.append(x)
elif x != move[0]:
pointL.append(x)
#oppL = list(filter(lambda x: state[ x[0] ][ x[1] ] == oppColor, adjacentDict[index1])) # list saving positions around the target point which have the opponent's chessmans
isChanged = False
################################# "Ganh":
# for x in adjacentDict[index1]:
# if state[ x[0] ][ x[1] ] not in ['.', currColor]:
# yR = row1*2 - x[0]
# yC = col1*2 - x[1]
# if yR in range(5) and yC in range(5) and state[ yR ][ yC ] not in ['.', currColor]: # then "ganh"
# new_state[ x[0] ][ x[1] ] = currColor
# new_state[ yR ][ yC ] = currColor
# isChanged = True
changedL = [] # list saving chessman positions of which colors are changed
newOppL = []
for x in oppL:
if new_state[ x[0] ][ x[1] ] == oppColor:
yR = row1*2 - x[0] # find the
yC = col1*2 - x[1] # symmetric point
if ( 0 <= yR < 5 ) and ( 0 <= yC < 5 ) and (new_state[ yR ][ yC ] == oppColor): # then "ganh"
new_state[ x[0] ][ x[1] ] = currColor
new_state[ yR ][ yC ] = currColor
isChanged = True
changedL.append(x)
changedL.append( (yR, yC) )
else:
newOppL.append(x)
# pL = oppL[:]
# i = 0
# j = 1
# isFound | |
<filename>epic_stuff_and_things.py
# -*- coding: utf-8 -*-
"""
Spyder Editor
This is a temporary script file.
"""
import sys
print("")
print("")
print("")
print( "Epic Stuff and Things" )
print( "By: <NAME>")
name = input("Enter your name: ")
color = input("Enter a color: ")
print("")
print("Welcome to epic stuff and things")
inv = []
troll = True
goblin = True
water_bottle = True
shatterd = False
couch_block = True
nailed = True
door_locked = True
#def iremove_inv(i):
# if check_inv(i):
# inv.remove(i)
# else:
# print("You don't seem to have", i , "in your inventory")
def remove_inv(i):
try:
inv.remove(i)
except ValueError:
print("You don't seem to have", i , "in your inventory")
def add_to_inv(i):
for item in inv:
if (i == item):
print("You already have that in your inventory")
return
inv.append(i)
def show_inv():
print(inv)
def check_inv(i):
if i in inv:
return True
return False
def attic_room():
cupboard = False
print("Hello " + name + " you are up in the attic of the " + color + " house.")
print("There is a large cupboard south of you. There are stairways leading to")
print("the north and east.")
while True:
move = input("What would you like to do? ")
if move == 'east':
living_room()
elif move == 'north':
kitchen()
elif move == 'inv':
show_inv()
elif move == 'open cupboard':
if check_inv("bronze key"):
print("In the cubard there is a hammer")
cupboard = True
else:
print("looks like the cupboard is locked")
elif move == 'take hammer' and cupboard == True:
print("got hammer")
add_to_inv("hammer")
elif move == 'inv':
show_inv()
else:
print("I'm not sure what you mean. ")
def front_room():
global troll
if troll:
print(name + " is now in the front room of the " + color + " house. There ")
print("is a troll blocking your way outside.")
else:
print("You are now in the front room of the " + color+ " house. There is")
print("a dead troll on the ground. There are doors to the north and west.")
while True:
move = input("what would you like to do? ")
if move == 'west':
kitchen()
elif move == 'north' and troll:
print("The troll is blocking your way.")
elif move == 'north' and not troll:
field()
elif move == 'kill troll':
if check_inv("sword"):
troll = False
print("Troll dies.")
else :
print("You don't have the object.")
elif move == 'inv':
show_inv()
else:
print("I'm not sure what you mean.")
def field():
print("""You are in a field north of the house, there is a path to the east
and a door to the south.""")
while True:
move = input("What would you like to do? ")
if move == 'south':
front_room()
elif move == 'east':
forest()
elif move == 'inv':
show_inv()
else:
print("I'm not sure what you mean.")
def forest():
global goblin
if goblin:
print("You are in a forest with a gobin blocking your way, the goblin has a ax.")
else:
print("You are in a forest there is a dead goblin on the ground.")
while True:
move = input("What would you like to do? ")
if move == 'west' and goblin and check_inv("sword"):
print("The goblin blockes your way.")
elif move == 'west' and not goblin:
field()
elif move == 'kill goblin':
if check_inv("sword"):
goblin = False
print("The goblin is dead.")
elif move == 'take ax' and goblin:
print("The goblin has the ax and will not give it to you.")
elif move == 'take ax' and not goblin:
print("got ax")
add_to_inv("ax")
elif move == 'inv':
show_inv()
else:
print("I'm not sure what you mean. ")
def living_room():
timeslook = 0
couch = 0
global couch_block
global nailed
global door_locked
if couch_block and nailed:
print("""You are now in the living room there is a large couch blocking
the nailed shut door to the east""")
elif couch_block and not nailed:
print("""You are now in the living room there is a large couch blocking
a closed shut door to the east.""")
elif not couch_block and nailed:
print("""You are now in the living room there is a couch by the nailed
shut door to the east.""")
elif not couch_block and not nailed:
print("""You are now in the living room there is a huge couch by the
closed door to the east.""")
while True:
move = input("What would you like to do? ")
if move == 'west':
attic_room()
elif move == 'open door' and couch_block and nailed and door_locked:
print("The door is blocked and nailed shut.")
elif move == 'open door' and couch_block and not nailed and door_locked:
print("The door is blocked but not nailed it also locked.")
elif move == 'open door' and not nailed and not couch_block and not door_locked:
grove()
elif move == 'open door' and not nailed and not couch_block and door_locked:
print("The door is still locked.")
elif move == 'open door' and not couch_block and nailed and not door_locked:
print("The door is nailied shut dut not locked.")
elif move == 'unlock door' and not check_inv("silver key"):
print("You don't have the silver key.")
elif move == 'unlock door' and check_inv("silver key") and not couch_block:
print("The key hole jammed but you turn hard and the door is unlocked")
door_locked = False
elif move == 'take off nails' and check_inv("hammer"):
print("With a great effort you take off the nails")
nailed = False
elif move == 'take off nails' and not check_inv("hammer"):
print("""You tried to take of the nails with your own finger but
hurts a lot. You decide to give up on taking of the nails becuase you got cut.
You need a hammer to get them of you decide.""")
elif move == 'look in couch':
if timeslook == 0:
print("Found sword")
add_to_inv("sword")
timeslook += 1
elif timeslook == 1:
print("Found bronze key")
add_to_inv("bronze key")
timeslook += 1
else:
print("There is nothing left in the couch")
elif move == 'move couch':
couch += 1
if couch % 2 == 1:
print("""with a great effort you move the couch away from the door""")
couch_block = False
elif couch % 2 == 0:
print("with a great effort you move the couch back.")
couch_block = True
elif move == 'inv':
show_inv()
else:
print("I'm not sure what you mean. ")
def kitchen():
global water_bottle
global shatterd
if water_bottle and not shatterd:
print("""You are in the kitchen and there is a black water bottle on the table
and stairs to the south of you and a door to the east""")
elif not water_bottle and not shatterd:
print("""You are know in the kitchen and there is a empty table next to you.
There is a door to the east of you and stairs leading to the south""")
elif not water_bottle and shatterd:
print("""You are know in the kitchen and a shatterd water bottle is all over the
floor and a table is just sitting there. There is a door to the east and stairs to
the south""")
while True:
move = input("What would you like to do? ")
if move == 'east':
front_room()
elif move == 'south':
attic_room()
elif move == 'take black water bottle':
print("Got black water bottle and you heard a jingle.")
add_to_inv("black water bottle")
water_bottle = False
elif move == 'throw water bottle' and not shatterd:
print("The black water bottle shatters and reveals a silver key")
shatterd = True
add_to_inv("silver key")
remove_inv("black water bottle")
elif move == 'throw water bottle' and shatterd:
print("You don't have a water bottle.")
elif move == 'inv':
show_inv()
elif move == 'test':
remove_inv("black water bottle")
else:
print("I'n not sure what you mean.")
def grove():
if not check_inv("ax"):
print("""You are now outside in a grove of trees. There is a door to the west""")
elif check_inv("ax"):
print ("""You enter a grove then all of the sudden a wall pops up and a
spider crawls over the wall and the spider is as big as you and the fangs as long
as your arm then you see the sand timer on it's belly and it is red. IT IS A
BLACK WIDOW!""")
while True:
move = input("What would you like to do? ")
| |
<gh_stars>0
#!/usr/bin/env python
# Copyright (C) 2015 OpenStack, LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# Manage Jenkins plugin module registry.
import copy
import logging
import operator
import pkg_resources
import re
import types
from jenkins_jobs.errors import JenkinsJobsException
from jenkins_jobs.formatter import deep_format
__all__ = [
"ModuleRegistry"
]
logger = logging.getLogger(__name__)
class MacroRegistry(object):
_component_to_component_list_mapping = {}
_component_list_to_component_mapping = {}
_macros_by_component_type = {}
_macros_by_component_list_type = {}
def __init__(self):
for entrypoint in pkg_resources.iter_entry_points(
group='jenkins_jobs.macros'):
Mod = entrypoint.load()
self._component_list_to_component_mapping[
Mod.component_list_type] = Mod.component_type
self._component_to_component_list_mapping[
Mod.component_type] = Mod.component_list_type
self._macros_by_component_type[
Mod.component_type] = {}
self._macros_by_component_list_type[
Mod.component_list_type] = {}
self._mask_warned = {}
@property
def _nonempty_component_list_types(self):
return [clt for clt in self._macros_by_component_list_type
if len(self._macros_by_component_list_type[clt]) != 0]
@property
def component_types(self):
return self._macros_by_component_type.keys()
def _is_macro(self, component_name, component_list_type):
return (component_name in
self._macros_by_component_list_type[component_list_type])
def register(self, component_type, macro):
macro_name = macro["name"]
clt = self._component_to_component_list_mapping[component_type]
self._macros_by_component_type[component_type][macro_name] = macro
self._macros_by_component_list_type[clt][macro_name] = macro
def expand_macros(self, jobish, template_data=None):
"""Create a copy of the given job-like thing, expand macros in place on
the copy, and return that object to calling context.
:arg dict jobish: A job-like JJB data structure. Could be anything that
might provide JJB "components" that get expanded to XML configuration.
This includes "job", "job-template", and "default" DSL items. This
argument is not modified in place, but rather copied so that the copy
may be returned to calling context.
:arg dict template_data: If jobish is a job-template, use the same
template data used to fill in job-template variables to fill in macro
variables.
"""
for component_list_type in self._nonempty_component_list_types:
self._expand_macros_for_component_list_type(
jobish, component_list_type, template_data)
def _expand_macros_for_component_list_type(self,
jobish,
component_list_type,
template_data=None):
"""In-place expansion of macros on jobish.
:arg dict jobish: A job-like JJB data structure. Could be anything that
might provide JJB "components" that get expanded to XML configuration.
This includes "job", "job-template", and "default" DSL items. This
argument is not modified in place, but rather copied so that the copy
may be returned to calling context.
:arg str component_list_type: A string value indicating which type of
component we are expanding macros for.
:arg dict template_data: If jobish is a job-template, use the same
template data used to fill in job-template variables to fill in macro
variables.
"""
if (jobish.get("project-type", None) == "pipeline"
and component_list_type == "scm"):
# Pipeline projects have an atypical scm type, eg:
#
# - job:
# name: whatever
# project-type: pipeline
# pipeline-scm:
# script-path: nonstandard-scriptpath.groovy
# scm:
# - macro_name
#
# as opposed to the more typical:
#
# - job:
# name: whatever2
# scm:
# - macro_name
#
# So we treat that case specially here.
component_list = jobish.get("pipeline-scm", {}).get("scm", [])
else:
component_list = jobish.get(component_list_type, [])
component_substitutions = []
for component in component_list:
macro_component_list = self._maybe_expand_macro(
component, component_list_type, template_data)
if macro_component_list is not None:
# Since macros can contain other macros, we need to recurse
# into the newly-expanded macro component list to expand any
# macros that might be hiding in there. In order to do this we
# have to make the macro component list look like a job by
# embedding it in a dictionary like so.
self._expand_macros_for_component_list_type(
{component_list_type: macro_component_list},
component_list_type,
template_data)
component_substitutions.append(
(component, macro_component_list))
for component, macro_component_list in component_substitutions:
component_index = component_list.index(component)
component_list.remove(component)
i = 0
for macro_component in macro_component_list:
component_list.insert(component_index + i, macro_component)
i += 1
def _maybe_expand_macro(self,
component,
component_list_type,
template_data=None):
"""For a given component, if it refers to a macro, return the
components defined for that macro with template variables (if any)
interpolated in.
:arg str component_list_type: A string value indicating which type of
component we are expanding macros for.
:arg dict template_data: If component is a macro and contains template
variables, use the same template data used to fill in job-template
variables to fill in macro variables.
"""
component_copy = copy.deepcopy(component)
if isinstance(component, dict):
# The component is a singleton dictionary of name:
# dict(args)
component_name, component_data = next(iter(component_copy.items()))
else:
# The component is a simple string name, eg "run-tests".
component_name, component_data = component_copy, None
if template_data:
# Address the case where a macro name contains a variable to be
# interpolated by template variables.
component_name = deep_format(component_name, template_data, True)
# Check that the component under consideration actually is a
# macro.
if not self._is_macro(component_name, component_list_type):
return None
# Warn if the macro shadows an actual module type name for this
# component list type.
if ModuleRegistry.is_module_name(component_name, component_list_type):
self._mask_warned[component_name] = True
logger.warning(
"You have a macro ('%s') defined for '%s' "
"component list type that is masking an inbuilt "
"definition" % (component_name, component_list_type))
macro_component_list = self._get_macro_components(component_name,
component_list_type)
# If macro instance contains component_data, interpolate that
# into macro components.
if component_data:
# Also use template_data, but prefer data obtained directly from
# the macro instance.
if template_data:
template_data = copy.deepcopy(template_data)
template_data.update(component_data)
macro_component_list = deep_format(
macro_component_list, template_data, False)
else:
macro_component_list = deep_format(
macro_component_list, component_data, False)
return macro_component_list
def _get_macro_components(self, macro_name, component_list_type):
"""Return the list of components that a macro expands into. For example:
- wrapper:
name: timeout-wrapper
wrappers:
- timeout:
fail: true
elastic-percentage: 150
elastic-default-timeout: 90
type: elastic
Provides a single "wrapper" type (corresponding to the "wrappers" list
type) component named "timeout" with the values shown above.
The macro_name argument in this case would be "timeout-wrapper".
"""
macro_component_list = self._macros_by_component_list_type[
component_list_type][macro_name][component_list_type]
return copy.deepcopy(macro_component_list)
class ModuleRegistry(object):
_entry_points_cache = {}
def __init__(self, jjb_config, plugins_list=None):
self.modules = []
self.modules_by_component_type = {}
self.handlers = {}
self.jjb_config = jjb_config
self.masked_warned = {}
if plugins_list is None:
self.plugins_dict = {}
else:
self.plugins_dict = self._get_plugins_info_dict(plugins_list)
for entrypoint in pkg_resources.iter_entry_points(
group='jenkins_jobs.modules'):
Mod = entrypoint.load()
mod = Mod(self)
self.modules.append(mod)
self.modules.sort(key=operator.attrgetter('sequence'))
if mod.component_type is not None:
self.modules_by_component_type[mod.component_type] = entrypoint
@staticmethod
def _get_plugins_info_dict(plugins_list):
def mutate_plugin_info(plugin_info):
"""
We perform mutations on a single member of plugin_info here, then
return a dictionary with the longName and shortName of the plugin
mapped to its plugin info dictionary.
"""
version = plugin_info.get('version', '0')
plugin_info['version'] = re.sub(r'(.*)-(?:SNAPSHOT|BETA).*',
r'\g<1>.preview', version)
aliases = []
for key in ['longName', 'shortName']:
value = plugin_info.get(key, None)
if value is not None:
aliases.append(value)
plugin_info_dict = {}
for name in aliases:
plugin_info_dict[name] = plugin_info
return plugin_info_dict
list_of_dicts = [mutate_plugin_info(v) for v in plugins_list]
plugins_info_dict = {}
for d in list_of_dicts:
plugins_info_dict.update(d)
return plugins_info_dict
def get_plugin_info(self, plugin_name):
""" This method is intended to provide information about plugins within
a given module's implementation of Base.gen_xml. The return value is a
dictionary with data obtained directly from a running Jenkins instance.
This allows module authors to differentiate generated XML output based
on information such as specific plugin versions.
:arg string plugin_name: Either the shortName or longName of a plugin
as see in a query that looks like:
``http://<jenkins-hostname>/pluginManager/api/json?pretty&depth=2``
During a 'test' run, it is possible to override JJB's query to a live
Jenkins instance by passing it a path to a file containing a YAML list
of dictionaries that mimics the plugin properties you want your test
output to reflect::
jenkins-jobs test -p /path/to/plugins-info.yaml
Below is example YAML that might be included in
/path/to/plugins-info.yaml.
.. literalinclude:: /../../tests/cmd/fixtures/plugins-info.yaml
"""
return self.plugins_dict.get(plugin_name, {})
def registerHandler(self, category, name, method):
cat_dict = self.handlers.get(category, {})
if not cat_dict:
self.handlers[category] = cat_dict
cat_dict[name] = method
def getHandler(self, category, name):
return self.handlers[category][name]
@property
def parser_data(self):
return self.__parser_data
def set_parser_data(self, parser_data):
self.__parser_data = parser_data
def dispatch(self, component_type, xml_parent, component):
"""This is a method that you can call from your implementation of
Base.gen_xml or component. It allows modules to define a type
of component, and benefit from extensibility via Python
entry points and Jenkins Job Builder :ref:`Macros <macro>`.
:arg string component_type: the name of the component
(e.g., `builder`)
:arg YAMLParser parser: the global YAML Parser
:arg | |
<gh_stars>10-100
#!/usr/bin/python
import numpy as np
import scipy.special as spc
'''
We have oribtals
Phi_1 = c_a * chi_a + c_b * chi_b
Phi_2 = c_c * chi_c + c_d * chi_d
where chi_i are gaussian type basis functions
and c_i are expansion coefficients
electron density of molecular orbitals Rho_1 = <phi_1|phi_1>
can be expressed using auxuliary gaussian basisfunctionc
rho_ab = chi_a * chi_b
Rho_1 = sim_ab c_a*c_b*S_ab * rho_ab
= sim_ab q_ab * rho_ab
where q_ab = c_a*c_b*S_ab is charge of the auxuliary electron blob
with S_ab being overlap integral between the basis functions chi_a*chi_b
we can use collective index i=ab and j=cd
qi = Sab*ca*cb
qj = Scd*cc*cd
The repulsion between blobs qi,qj can be expressed as
qi*qj / =
'''
const_hbar_SI = 1.054571817e-34; #< [J.s] #6.582119569e-16 # [eV/s]
const_Me_SI = 9.10938356e-31; #< [kg]
const_e_SI = 1.602176620898e-19; #< [Coulomb]
const_eps0_SI = 8.854187812813e-12; #< [F.m = Coulomb/(Volt*m)]
const_eV_SI = 1.602176620898e-19; #< [J]
const_Angstroem_SI = 1.0e-10;
const_K_SI = const_hbar_SI*const_hbar_SI/const_Me_SI;
const_El_SI = const_e_SI*const_e_SI/(4.*np.pi*const_eps0_SI);
const_Ry_SI = 0.5 * const_El_SI*const_El_SI/const_K_SI;
const_Ry_eV = 13.6056925944;
const_El_eVA = const_El_SI/( const_e_SI*const_Angstroem_SI );
const_K_eVA = (const_El_eVA*const_El_eVA)/(2*const_Ry_eV);
const_Ke_eVA = const_K_eVA*1.5;
M_SQRT2 = 1.41421356237
M_SQRT1_2 = 1/M_SQRT2
M_2_SQRTPI = 1.12837916709551257390
def Coulomb( r, s ):
'''
double ir = 1./r; //(r+1.e-8);
double is = 1./s; //(s+1.e-8);
double r_s = r*is;
double r_2s = M_SQRT1_2 * r_s; // This is for charge-density blobs (assuming si,sj comes from charge denisty)
//double r_2s = r_s;
//double r_2s = M_SQRT2 * r_s; // This is for wavefunction blobs (assuming si,sj comes from wavefunction)
double e1 = ir * const_El_eVA;
double e2 = erf( r_2s ); // ToDo : this should be possible to compute together !!!
double g = exp( -r_2s*r_2s ) * const_F2;
double f1 = -e1*ir;
double f2 = g*is*0.5;
double e1f2 = e1*f2;
fr = (f1*e2 + e1f2)*ir;
fs = e1f2 *r_s * is;
return e1 * e2;
'''
# ToDo: maybe we can do without s=sqrt(s2) and r=sqrt(r2)
#constexpr const double const_F2 = -2.*sqrt(2./np.pi);
#const_F2 = M_2_SQRTPI * M_SQRT2;
const_F2 = 2*np.sqrt(2/np.pi)
ir = 1./r #(r+1.e-8);
is_ = 1./s #(s+1.e-8);
r_s = r*is_
r_2s = M_SQRT1_2 * r_s; # This is for charge-density blobs (assuming si,sj comes from charge denisty)
#r_2s = r_s;
#r_2s = M_SQRT2 * r_s; # This is for wavefunction blobs (assuming si,sj comes from wavefunction)
e1 = ir * const_El_eVA
e2 = spc.erf( r_2s )
g = np.exp( -r_2s*r_2s ) * const_F2
f1 = -e1*ir
#f2 = g*is_ # This is for wavefunction blobs (assuming si,sj comes from wavefunction)
f2 = g*is_*0.5 # This is for charge-density blobs (assuming si,sj comes from charge denisty)
e1f2 = e1*f2
fr = (f1*e2 + e1f2)*ir
fs = e1f2 *r_s * is_
E = e1 * e2
#for i in range(len(r)):
# #print " r %g fr %g = (f1 %g * e2 %g )+(e1 %g *f2 %g) r_2s %g r %g s %g " %(r[i], fr[i], f1[i],e2[i], e1[i],f2[i], r_2s[i], r[i], s )
# #print "Gauss::Coulomb r %g s %g E %g fr %g fs %g " %((r+0*s)[i],(s+0*r)[i], E[i], fr[i], fs[i] )
# print "Gauss::Coulomb r,s %g,%g f1,2 %g,%g e1,2 %g,%g ir %g " %( (r+0*s)[i],(s+0*r)[i], (f1+s*0)[i], (f2+s*0)[i], (e1+s*0)[i], (e2+s*0)[i], (ir+s*0)[i] )
return E,fr,fs
def Coulomb_new( r, s ):
'''
// This gives correct hydrogen molecule
double Amp = const_El_eVA;
//double is = M_SQRT2/s; // Original from paper (eq.2c) http://aip.scitation.org/doi/10.1063/1.3272671
//double is = 1/s;
double is = 1/(s*M_SQRT2);
double E = erfx_e6( r, is, fr ); // This is for charge-density blobs (assuming si,sj comes from charge denisty)
double r_s = r*is;
//fs = gauss_p8(r_s ) *is*is*is*(M_2_SQRTPI*Amp); // How is it possible that "is" was added ?
fs = gauss_p8(r_s ) *is*is*is*(M_2_SQRTPI*2*Amp); // How is it possible that "is" was added ?
E *= Amp;
fr*= Amp*(1/(r+1e-16)); // ToDo : erfx_e6( ) should return (fr/r) to make this more efficient
'''
Amp = const_El_eVA
is_ = 1./(s*M_SQRT2)
#print len(r)
#print len(is_)
E, fr = erfx_approx_d( r, is_ ) # This is for charge-density blobs (assuming si,sj comes from charge denisty)
#print " E ", E
#print " r ", r
r_s = r*is_
#fs = gauss_p8(r_s ) *is*is*is*(M_2_SQRTPI*Amp) # How is it possible that "is" was added ?
fs = gauss_p8(r_s ) *is_*is_*is_*(M_2_SQRTPI*2*Amp) # How is it possible that "is" was added ?
E *= Amp
fr *= Amp*(1/(r+1e-16)) # ToDo : erfx_e6( ) should return (fr/r) to make this more efficient
#for i in range(len(r)):
# print "py r %g is %g E %g " %(r[i], (is_+r*0)[i], E[i] );
return E,fr,fs
def product3D_s_deriv( si,pi, sj,pj ):
''' returns
S, p,
dSsi, dSsj,
dXsi, dXsj,
dXxi, dXxj,
dCsi, dCsj, dCr
'''
'''
#define _Gauss_sij_aux( si, sj ) \
double si2 = si*si; \
double sj2 = sj*sj; \
double s2 = si2 + sj2; \
double is2 = 1/s2; \
double is4 = is2*is2; \
'''
si2 = si*si
sj2 = sj*sj
s2 = si2 + sj2
is2 = 1/s2
is4 = is2*is2
''''
#define _Gauss_product(pi,pj,si,sj) \
double sqrt_is2 = sqrt(is2); \
double size_ij = si*sj*sqrt_is2; \
Vec3d pos_ij = pj*(si2*is2) + pi*(sj2*is2); \
'''
sqrt_is2 = np.sqrt(is2)
s = si*sj*sqrt_is2 # size
p = pj*(si2*is2) + pi*(sj2*is2) # position
dp = pi-pj
r2 = dp*dp # r2 = dp.norm2() in 1D
'''
#define _Gauss_product_derivs(dSsi,dSsj,dXsi,dXsj,dXxi,dXxj){ \
double inv3_2 = is2 * sqrt_is2; \
dSsi = sj*sj2*inv3_2; \
dSsj = si*si2*inv3_2; \
dXsi = dp*(-2*si*sj2*is4); \
dXsj = dp*( 2*sj*si2*is4); \
dXxi = sj2*is2; \
dXxj = si2*is2; \
'''
inv3_2 = is2*sqrt_is2
dSsi = sj*sj2*inv3_2
dSsj = si*si2*inv3_2
dXsi = dp*(-2*si*sj2*is4)
dXsj = dp*( 2*sj*si2*is4)
dXxi = sj2*is2
dXxj = si2*is2
'''
#define _Gauss_overlap( r2, si, sj ) \
double sisj = si*sj; \
double inv_sisj= 1/sisj; \
double g = exp( -r2/(2*s2) ); \
double S = (2*M_SQRT2) * g * sisj*sisj*is2 * sqrt( inv_sisj*is2 ) ; \
double dS_dr = -S * is2; \
double inv_si = sj*inv_sisj; \
double inv_sj = si*inv_sisj; \
double S_s4 = S * is4; \
double dS_dsi = S_s4 * ( si2*r2 + 3*sj2*s2 ) * inv_si; \
double dS_dsj = S_s4 * ( sj2*r2 + 3*si2*s2 ) * inv_sj; \
dS_dsi -= 1.5*S * inv_si; \
dS_dsj -= 1.5*S * inv_sj; \
'''
sisj = si*sj
inv_sisj= 1/sisj
g = np.exp( -r2/(2*s2) )
S = (2*M_SQRT2) * g * sisj*sisj*is2 * np.sqrt( inv_sisj*is2 )
dS_dr = -S * is2
inv_si = sj*inv_sisj
inv_sj = si*inv_sisj
S_s4 = S * is4
dS_dsi = S_s4 * ( si2*r2 + 3*sj2*s2 ) * inv_si
dS_dsj = S_s4 * ( sj2*r2 + 3*si2*s2 ) * inv_sj
dS_dsi -= 1.5*S * inv_si
dS_dsj -= 1.5*S * inv_sj
'''
# ==== Overlaps OLD
a2 = 2.*(si*sj)*is2
a = np.sqrt(a2)
e1 = a2*a
e2 = np.exp( -r2*is2 )
f1 = 3.*a * (si2-sj2)*is4
f2 = 2.*e2 * r2*is4
dCsi = e1*f2*si - e2*f1*sj
dCsj = e1*f2*sj + e2*f1*si
C = e1*e2 # Overlap
dCr = C*(-2.*is2) # derivative is correct, tested !
'''
return S,s,p, dS_dr*dp, (dSsi,dXsi,dXxi,dS_dsi), (dSsj,dXsj,dXxj,dS_dsj)
def checkNumDeriv( x, func, dfunc, name ):
dy = dfunc( x )
y = func(x)
dynum,xnum = numDeriv( x, y )
#print y
#print "y.shape, ynum.shape ", y.shape, ynum.shape
plotVsNum( x,dy,dynum, name )
plt.plot(x, y,'-.', label=name+"_F" )
def erfx_approx( x, s ):
x=np.abs(x)
ys = 1./x
invs = 1/s
x *= invs
xx = x*x;
even = 0.9850156202961753 +xx*(-0.02756061032579559 +xx*(-0.00188409579491924 +xx*(-0.003098629936170076 +xx*(-0.001348858853909826 +xx*(-3.98946569988845e-05 ) ) ) ) );
odd = -0.13893350387140332 +xx*(-0.007664292475021448 +xx*( 0.003046826535877866 +xx*( 0.002879338499080343 +xx*( 0.0003260490382458129 +xx*( 1.97093650414204e-06 ) ) ) ) );
t = even + x*odd;
t*=t; t*=t; t*=t; # ^8
mask = np.abs(x)<4.5
ys[mask] = (invs/( t + x ))[mask]
#ys[mask] = ( t )[mask]
return ys
'''
inline double erfx_e6( double x_, double k, double& dy ){
// approximation of erf(k*x)/x and its derivative with maximum error ~ 1e-6
double x =x_*k;
if( x>4.5 ){ double y=1/x_; dy=-y*y; return y; }
double xx = x*x;
double even = 0.9850156202961753 +xx*(-0.02756061032579559 +xx*(-0.00188409579491924 +xx*(-0.003098629936170076 +xx*(-0.001348858853909826 +xx*(-3.98946569988845e-05 | |
<gh_stars>10-100
# -*- coding: utf-8 -*-
'''
stacking.py raet protocol stacking classes
'''
# pylint: skip-file
# pylint: disable=W0611
# Import python libs
import socket
import binascii
import struct
try:
import simplejson as json
except ImportError:
import json
# Import ioflo libs
from ioflo.aid.odicting import odict
from ioflo.aid.osetting import oset
from ioflo.aid.timing import StoreTimer
from ioflo.aid.aiding import packByte, unpackByte
# Import raet libs
from ..abiding import * # import globals
from .. import raeting
from ..raeting import Acceptance, PcktKind, TrnsKind, CoatKind, FootKind
from .. import nacling
from . import packeting
from . import estating
from ioflo.base.consoling import getConsole
console = getConsole()
class Transaction(object):
'''
RAET protocol transaction class
'''
Timeout = 5.0 # default timeout
def __init__(self, stack=None, remote=None, kind=None, timeout=None,
rmt=False, bcst=False, sid=None, tid=None,
txData=None, txPacket=None, rxPacket=None):
'''
Setup Transaction instance
timeout of 0.0 means no timeout go forever
'''
self.stack = stack
self.remote = remote
self.kind = kind or raeting.PACKET_DEFAULTS['tk']
if timeout is None:
timeout = self.Timeout
self.timeout = timeout
self.timer = StoreTimer(self.stack.store, duration=self.timeout)
self.rmt = rmt # remote initiator
self.bcst = bcst # bf flag
self.sid = sid
self.tid = tid
self.txData = txData or odict() # data used to prepare last txPacket
self.txPacket = txPacket # last tx packet needed for retries
self.rxPacket = rxPacket # last rx packet needed for index
@property
def index(self):
'''
Property is transaction tuple (rf, le, re, si, ti, bf,)
Not to be used in join (Joiner and Joinent) since bootstrapping
Use the txPacket (Joiner) or rxPacket (Joinent) .data instead
'''
le = self.remote.nuid
re = self.remote.fuid
return ((self.rmt, le, re, self.sid, self.tid, self.bcst,))
def process(self):
'''
Process time based handling of transaction like timeout or retries
'''
pass
def receive(self, packet):
'''
Process received packet Subclasses should super call this
'''
self.rxPacket = packet
def transmit(self, packet):
'''
Queue tx duple on stack transmit queue
'''
try:
self.stack.tx(packet.packed, self.remote.uid)
except raeting.StackError as ex:
console.terse(str(ex) + '\n')
self.stack.incStat(self.statKey())
self.remove(remote=self.remote, index=packet.index)
return
self.txPacket = packet
def add(self, remote=None, index=None):
'''
Add self to remote transactions
'''
if not index:
index = self.index
if not remote:
remote = self.remote
remote.addTransaction(index, self)
def remove(self, remote=None, index=None):
'''
Remove self from remote transactions
'''
if not index:
index = self.index
if not remote:
remote = self.remote
if remote:
remote.removeTransaction(index, transaction=self)
def statKey(self):
'''
Return the stat name key from class name
'''
return ("{0}_transaction_failure".format(self.__class__.__name__.lower()))
def nack(self, **kwa):
'''
Placeholder override in sub class
nack to terminate transaction with other side of transaction
'''
pass
class Initiator(Transaction):
'''
RAET protocol initiator transaction class
'''
def __init__(self, **kwa):
'''
Setup Transaction instance
'''
kwa['rmt'] = False # force rmt to False since local initator
super(Initiator, self).__init__(**kwa)
def process(self):
'''
Process time based handling of transaction like timeout or retries
'''
if self.timeout > 0.0 and self.timer.expired:
self.remove()
class Correspondent(Transaction):
'''
RAET protocol correspondent transaction class
'''
Requireds = ['sid', 'tid', 'rxPacket']
def __init__(self, **kwa):
'''
Setup Transaction instance
'''
kwa['rmt'] = True # force rmt to True since remote initiator
missing = []
for arg in self.Requireds:
if arg not in kwa:
missing.append(arg)
if missing:
emsg = "Missing required keyword arguments: '{0}'".format(missing)
raise TypeError(emsg)
super(Correspondent, self).__init__(**kwa)
class Staler(Initiator):
'''
RAET protocol Staler initiator transaction class
'''
def __init__(self, **kwa):
'''
Setup Transaction instance
'''
for key in ['kind', 'sid', 'tid', 'rxPacket']:
if key not in kwa:
emsg = "Missing required keyword arguments: '{0}'".format(key)
raise TypeError(emsg)
super(Staler, self).__init__(**kwa)
self.prep()
def prep(self):
'''
Prepare .txData for nack to stale
'''
self.txData.update(
dh=self.rxPacket.data['sh'], # may need for index
dp=self.rxPacket.data['sp'], # may need for index
se=self.remote.nuid,
de=self.rxPacket.data['se'],
tk=self.kind,
cf=self.rmt,
bf=self.bcst,
si=self.sid,
ti=self.tid,
ck=self.rxPacket.data['ck'], # CoatKind.nada.value,
fk=self.rxPacket.data['fk'], # FootKind.nada.value
)
def nack(self):
'''
Send nack to stale packet from correspondent.
This is used when a correspondent packet is received but no matching
Initiator transaction is found. So create a dummy initiator and send
a nack packet back. Do not add transaction so don't need to remove it.
'''
ha = (self.rxPacket.data['sh'], self.rxPacket.data['sp'])
try:
tkname = TrnsKind(self.rxPacket.data['tk'])
except ValueError as ex:
tkname = None
try:
pkname = TrnsKind(self.rxPacket.data['pk'])
except ValueError as ex:
pkname = None
emsg = ("Staler '{0}'. Stale transaction '{1}' packet '{2}' from '{3}' in {4} "
"nacking...\n".format(self.stack.name, tkname, pkname, ha, self.tid))
console.terse(emsg)
self.stack.incStat('stale_correspondent_attempt')
if self.rxPacket.data['se'] not in self.stack.remotes:
emsg = "Staler '{0}'. Unknown correspondent estate id '{1}'\n".format(
self.stack.name, self.rxPacket.data['se'])
console.terse(emsg)
self.stack.incStat('unknown_correspondent_uid')
#return #maybe we should return and not respond at all in this case
body = odict()
packet = packeting.TxPacket(stack=self.stack,
kind=PcktKind.nack.value,
embody=body,
data=self.txData)
try:
packet.pack()
except raeting.PacketError as ex:
console.terse(str(ex) + '\n')
self.stack.incStat("packing_error")
return
self.stack.txes.append((packet.packed, ha))
console.terse("Staler '{0}'. Do Nack of stale correspondent {1} in {2} at {3}\n".format(
self.stack.name, ha, self.tid, self.stack.store.stamp))
self.stack.incStat('stale_correspondent_nack')
class Stalent(Correspondent):
'''
RAET protocol Stalent correspondent transaction class
'''
Requireds = ['kind', 'sid', 'tid', 'rxPacket']
def __init__(self, **kwa):
'''
Setup Transaction instance
'''
super(Stalent, self).__init__(**kwa)
self.prep()
def prep(self):
'''
Prepare .txData for nack to stale
'''
self.txData.update(
dh=self.rxPacket.data['sh'], # may need for index
dp=self.rxPacket.data['sp'], # may need for index
se=self.rxPacket.data['de'],
de=self.rxPacket.data['se'],
tk=self.kind,
cf=self.rmt,
bf=self.bcst,
si=self.sid,
ti=self.tid,
ck=self.rxPacket.data['ck'], # CoatKind.nada.value
fk=self.rxPacket.data['fk'], # FootKind.nada.value
)
def nack(self, kind=PcktKind.nack.value):
'''
Send nack to stale packet from initiator.
This is used when a initiator packet is received but with a stale session id
So create a dummy correspondent and send a nack packet back.
Do not add transaction so don't need to remove it.
'''
ha = (self.rxPacket.data['sh'], self.rxPacket.data['sp'])
try:
tkname = TrnsKind(self.rxPacket.data['tk'])
except ValueError as ex:
tkname = None
try:
pkname = TrnsKind(self.rxPacket.data['pk'])
except ValueError as ex:
pkname = None
emsg = ("Stalent '{0}'. Stale transaction '{1}' packet '{2}' from '{3}' in {4} "
"nacking ...\n".format(self.stack.name, tkname, pkname, ha, self.tid))
console.terse(emsg)
self.stack.incStat('stale_initiator_attempt')
if self.rxPacket.data['se'] not in self.stack.remotes:
emsg = "Stalent '{0}'. Unknown initiator estate id '{1}'\n".format(
self.stack.name,
self.rxPacket.data['se'])
console.terse(emsg)
self.stack.incStat('unknown_initiator_uid')
#return #maybe we should return and not respond at all in this case
body = odict()
packet = packeting.TxPacket(stack=self.stack,
kind=kind,
embody=body,
data=self.txData)
try:
packet.pack()
except raeting.PacketError as ex:
console.terse(str(ex) + '\n')
self.stack.incStat("packing_error")
return
if kind == PcktKind.renew:
console.terse("Stalent '{0}'. Do Renew of {1} in {2} at {3}\n".format(
self.stack.name, ha, self.tid, self.stack.store.stamp))
elif kind == PcktKind.refuse:
console.terse("Stalent '{0}'. Do Refuse of {1} in {2} at {3}\n".format(
self.stack.name, ha, self.tid, self.stack.store.stamp))
elif kind == PcktKind.reject:
console.terse("Stalent '{0}'. Do Reject of {1} in {2} at {3}\n".format(
self.stack.name, ha, self.tid, self.stack.store.stamp))
elif kind == PcktKind.nack:
console.terse("Stalent '{0}'. Do Nack of {1} in {2} at {3}\n".format(
self.stack.name, ha, self.tid, self.stack.store.stamp))
else:
console.terse("Stalent '{0}'. Invalid nack kind {1}. Do Nack of {2} anyway "
" to {3) at {4}\n".format(self.stack.name,
kind,
ha,
self.tid,
self.stack.store.stamp))
kind == PcktKind.nack
self.stack.txes.append((packet.packed, ha))
self.stack.incStat('stale_initiator_nack')
class Joiner(Initiator):
'''
RAET protocol Joiner Initiator class Dual of Joinent
Joiner must always add new remote since always must anticipate response to
request.
'''
RedoTimeoutMin = 1.0 # initial timeout
RedoTimeoutMax = 4.0 # max timeout
PendRedoTimeout = 60.0 # Redo timeout when pended
def __init__(self,
redoTimeoutMin=None,
redoTimeoutMax=None,
pendRedoTimeout=None,
cascade=False,
renewal=False,
**kwa):
'''
Setup Transaction instance
'''
kwa['kind'] = TrnsKind.join.value
super(Joiner, self).__init__(**kwa)
self.cascade = cascade
self.redoTimeoutMax = redoTimeoutMax or self.RedoTimeoutMax
self.redoTimeoutMin = redoTimeoutMin or self.RedoTimeoutMin
self.redoTimer = StoreTimer(self.stack.store,
duration=self.redoTimeoutMin)
self.pendRedoTimeout = pendRedoTimeout or self.PendRedoTimeout
self.sid = 0 #always 0 for join
self.tid = self.remote.nextTid()
# fuid is assigned during join but want to preserve vacuousness for remove
self.vacuous = (self.remote.fuid == 0)
self.renewal = renewal # is current join a renew, vacuous rejoin
self.pended = False # Farside Correspondent has pended remote acceptance
self.prep()
# don't dump remote yet since its ephemeral until we join and get valid uid
def transmit(self, packet):
'''
Augment transmit with restart of redo timer
'''
super(Joiner, self).transmit(packet)
self.redoTimer.restart()
def add(self, remote=None, index=None):
'''
Augment with add self.remote to stack.joinees if vacuous
'''
super(Joiner, self).add(remote=remote, index=index)
# self.remote is now assigned
if self.vacuous: # vacuous
self.stack.joinees[self.remote.ha] = self.remote
def remove(self, remote=None, index=None):
'''
Remove self from stack transactions
'''
super(Joiner, self).remove(remote=remote, index=index)
# self.remote is now assigned
if self.vacuous: # vacuous
if self.remote.ha in self.stack.joinees and not self.remote.transactions:
del self.stack.joinees[self.remote.ha]
def receive(self, packet):
"""
Process received packet belonging to this transaction
| |
to compute the output shape.
Input shape:
This depends on the `layout` parameter. Input is 4D array of shape
(batch_size, channels, height, width) if `layout` is `NCHW`.
Output shape:
This depends on the `layout` parameter. Output is 4D array of shape
(batch_size, channels, out_height, out_width) if `layout` is `NCHW`.
out_height and out_width are calculated as::
out_height = floor((height+2*padding[0]-pool_size[0])/strides[0])+1
out_width = floor((width+2*padding[1]-pool_size[1])/strides[1])+1
When `ceil_mode` is `True`, ceil will be used instead of floor in this
equation.
"""
def __init__(self, pool_size=(2, 2), strides=None, padding=0, layout='NCHW',
ceil_mode=False, **kwargs):
assert layout == 'NCHW', "Only supports NCHW layout for now"
if isinstance(pool_size, numeric_types):
pool_size = (pool_size,)*2
assert len(pool_size) == 2, "pool_size must be a number or a list of 2 ints"
super(MaxPool2D, self).__init__(
pool_size, strides, padding, ceil_mode, False, 'max', **kwargs)
class MaxPool3D(_Pooling):
"""Max pooling operation for 3D data (spatial or spatio-temporal).
Parameters
----------
pool_size: int or list/tuple of 3 ints,
Size of the max pooling windows.
strides: int, list/tuple of 3 ints, or None.
Factor by which to downscale. E.g. 2 will halve the input size.
If `None`, it will default to `pool_size`.
padding: int or list/tuple of 3 ints,
If padding is non-zero, then the input is implicitly
zero-padded on both sides for padding number of points.
layout : str, default 'NCDHW'
Dimension ordering of data and weight. Can be 'NCDHW', 'NDHWC', etc.
'N', 'C', 'H', 'W', 'D' stands for batch, channel, height, width and
depth dimensions respectively. padding is applied on 'D', 'H' and 'W'
dimension.
ceil_mode : bool, default False
When `True`, will use ceil instead of floor to compute the output shape.
Input shape:
This depends on the `layout` parameter. Input is 5D array of shape
(batch_size, channels, depth, height, width) if `layout` is `NCDHW`.
Output shape:
This depends on the `layout` parameter. Output is 5D array of shape
(batch_size, channels, out_depth, out_height, out_width) if `layout`
is `NCDHW`.
out_depth, out_height and out_width are calculated as ::
out_depth = floor((depth+2*padding[0]-pool_size[0])/strides[0])+1
out_height = floor((height+2*padding[1]-pool_size[1])/strides[1])+1
out_width = floor((width+2*padding[2]-pool_size[2])/strides[2])+1
When `ceil_mode` is `True`, ceil will be used instead of floor in this
equation.
"""
def __init__(self, pool_size=(2, 2, 2), strides=None, padding=0,
ceil_mode=False, layout='NCDHW', **kwargs):
assert layout == 'NCDHW', "Only supports NCDHW layout for now"
if isinstance(pool_size, numeric_types):
pool_size = (pool_size,)*3
assert len(pool_size) == 3, "pool_size must be a number or a list of 3 ints"
super(MaxPool3D, self).__init__(
pool_size, strides, padding, ceil_mode, False, 'max', **kwargs)
class AvgPool1D(_Pooling):
"""Average pooling operation for temporal data.
Parameters
----------
pool_size: int
Size of the max pooling windows.
strides: int, or None
Factor by which to downscale. E.g. 2 will halve the input size.
If `None`, it will default to `pool_size`.
padding: int
If padding is non-zero, then the input is implicitly
zero-padded on both sides for padding number of points.
layout : str, default 'NCW'
Dimension ordering of data and weight. Can be 'NCW', 'NWC', etc.
'N', 'C', 'W' stands for batch, channel, and width (time) dimensions
respectively. padding is applied on 'W' dimension.
ceil_mode : bool, default False
When `True`, will use ceil instead of floor to compute the output shape.
Input shape:
This depends on the `layout` parameter. Input is 3D array of shape
(batch_size, channels, width) if `layout` is `NCW`.
Output shape:
This depends on the `layout` parameter. Output is 3D array of shape
(batch_size, channels, out_width) if `layout` is `NCW`.
out_width is calculated as::
out_width = floor((width+2*padding-pool_size)/strides)+1
When `ceil_mode` is `True`, ceil will be used instead of floor in this
equation.
"""
def __init__(self, pool_size=2, strides=None, padding=0, layout='NCW',
ceil_mode=False, **kwargs):
assert layout == 'NCW', "Only supports NCW layout for now"
if isinstance(pool_size, numeric_types):
pool_size = (pool_size,)
assert len(pool_size) == 1, "pool_size must be a number or a list of 1 ints"
super(AvgPool1D, self).__init__(
pool_size, strides, padding, ceil_mode, False, 'avg', **kwargs)
class AvgPool2D(_Pooling):
"""Average pooling operation for spatial data.
Parameters
----------
pool_size: int or list/tuple of 2 ints,
Size of the max pooling windows.
strides: int, list/tuple of 2 ints, or None.
Factor by which to downscale. E.g. 2 will halve the input size.
If `None`, it will default to `pool_size`.
padding: int or list/tuple of 2 ints,
If padding is non-zero, then the input is implicitly
zero-padded on both sides for padding number of points.
layout : str, default 'NCHW'
Dimension ordering of data and weight. Can be 'NCHW', 'NHWC', etc.
'N', 'C', 'H', 'W' stands for batch, channel, height, and width
dimensions respectively. padding is applied on 'H' and 'W' dimension.
ceil_mode : bool, default False
When True, will use ceil instead of floor to compute the output shape.
Input shape:
This depends on the `layout` parameter. Input is 4D array of shape
(batch_size, channels, height, width) if `layout` is `NCHW`.
Output shape:
This depends on the `layout` parameter. Output is 4D array of shape
(batch_size, channels, out_height, out_width) if `layout` is `NCHW`.
out_height and out_width are calculated as::
out_height = floor((height+2*padding[0]-pool_size[0])/strides[0])+1
out_width = floor((width+2*padding[1]-pool_size[1])/strides[1])+1
When `ceil_mode` is `True`, ceil will be used instead of floor in this
equation.
"""
def __init__(self, pool_size=(2, 2), strides=None, padding=0,
ceil_mode=False, layout='NCHW', **kwargs):
assert layout == 'NCHW', "Only supports NCHW layout for now"
if isinstance(pool_size, numeric_types):
pool_size = (pool_size,)*2
assert len(pool_size) == 2, "pool_size must be a number or a list of 2 ints"
super(AvgPool2D, self).__init__(
pool_size, strides, padding, ceil_mode, False, 'avg', **kwargs)
class AvgPool3D(_Pooling):
"""Average pooling operation for 3D data (spatial or spatio-temporal).
Parameters
----------
pool_size: int or list/tuple of 3 ints,
Size of the max pooling windows.
strides: int, list/tuple of 3 ints, or None.
Factor by which to downscale. E.g. 2 will halve the input size.
If `None`, it will default to `pool_size`.
padding: int or list/tuple of 3 ints,
If padding is non-zero, then the input is implicitly
zero-padded on both sides for padding number of points.
layout : str, default 'NCDHW'
Dimension ordering of data and weight. Can be 'NCDHW', 'NDHWC', etc.
'N', 'C', 'H', 'W', 'D' stands for batch, channel, height, width and
depth dimensions respectively. padding is applied on 'D', 'H' and 'W'
dimension.
ceil_mode : bool, default False
When True, will use ceil instead of floor to compute the output shape.
Input shape:
This depends on the `layout` parameter. Input is 5D array of shape
(batch_size, channels, depth, height, width) if `layout` is `NCDHW`.
Output shape:
This depends on the `layout` parameter. Output is 5D array of shape
(batch_size, channels, out_depth, out_height, out_width) if `layout`
is `NCDHW`.
out_depth, out_height and out_width are calculated as ::
out_depth = floor((depth+2*padding[0]-pool_size[0])/strides[0])+1
out_height = floor((height+2*padding[1]-pool_size[1])/strides[1])+1
out_width = floor((width+2*padding[2]-pool_size[2])/strides[2])+1
When `ceil_mode` is `True,` ceil will be used instead of floor in this
equation.
"""
def __init__(self, pool_size=(2, 2, 2), strides=None, padding=0,
ceil_mode=False, layout='NCDHW', **kwargs):
assert layout == 'NCDHW', "Only supports NCDHW layout for now"
if isinstance(pool_size, numeric_types):
pool_size = (pool_size,)*3
assert len(pool_size) == 3, "pool_size must be a number or a list of 3 ints"
super(AvgPool3D, self).__init__(
pool_size, strides, padding, ceil_mode, False, 'avg', **kwargs)
class GlobalMaxPool1D(_Pooling):
"""Global max pooling operation for temporal data."""
def __init__(self, layout='NCW', **kwargs):
assert layout == 'NCW', "Only supports NCW layout for now"
super(GlobalMaxPool1D, self).__init__(
(1,), None, 0, True, True, 'max', **kwargs)
class GlobalMaxPool2D(_Pooling):
"""Global max pooling operation for spatial data."""
def __init__(self, layout='NCHW', **kwargs):
assert layout == 'NCHW', "Only supports NCW layout for now"
super(GlobalMaxPool2D, self).__init__(
(1, 1), None, 0, True, True, 'max', **kwargs)
class GlobalMaxPool3D(_Pooling):
"""Global max pooling operation for 3D data."""
def __init__(self, layout='NCDHW', **kwargs):
assert layout == 'NCDHW', "Only supports NCW layout for now"
super(GlobalMaxPool3D, self).__init__(
(1, 1, 1), None, 0, True, True, 'max', **kwargs)
class GlobalAvgPool1D(_Pooling):
"""Global average pooling operation for temporal data."""
def __init__(self, layout='NCW', **kwargs):
assert layout == 'NCW', "Only supports NCW layout for now"
super(GlobalAvgPool1D, self).__init__(
(1,), None, 0, True, True, 'avg', **kwargs)
class GlobalAvgPool2D(_Pooling):
"""Global average pooling operation for spatial data."""
def __init__(self, layout='NCHW', **kwargs):
assert layout == 'NCHW', "Only supports NCW layout for now"
super(GlobalAvgPool2D, self).__init__(
(1, 1), None, 0, True, | |
def table_slice_2_cc1(table, r2, r_max, c1, c2):
"""
Function to cut a correct slice out of array for CC1 in _find_cc1_cc2().
Cuts out the row header.
"""
# one more row and column index than in the published pseudocode is needed,
# since the a:b notation in python doesn't include b
# contrary to the published pseudocode, the correct range is [r2:r_max,c1:c2] and not [r2+1:c2,c1+1:r_max]
if r2 + 1 == r_max and c1 == c2:
section = table[r2 + 1, c1]
elif r2 + 1 == r_max and c1 != c2:
section = table[r2 + 1, c1: c2 + 1]
elif r2 + 1 != r_max and c1 != c2:
section = table[r2 + 1: r_max + 1, c1: c2 + 1]
elif r2 + 1 != r_max and c1 == c2:
section = table[r2 + 1: r_max + 1, c1]
else:
log.critical(
"Not defined section 2 for cc1, r2+1= {}, c2= {}, c1+1= {}, r_max= {}".format(r2 + 1, c2, c1 + 1,
r_max))
section = None
return section
# MAIN MIPS algorithm
# Locate candidate MIPs by finding the minimum indexing headers:
# This is significantly altered compared to the published pseudocode, which is flawed.
# The pseudocode clearly does not return cc2 if the column has not been changed and it doesn't
# discriminate between duplicate rows in the row header vs duplicate columns in the column header
while c2 < c_max and r2 >= r1:
log.debug("Entering loop: r_max= {}, c_max= {}, c1= {}, c2= {}, r1= {}, r2= {}, cc2= {}"
.format(r_max, c_max, c1, c2, r1, r2, cc2))
temp_section_1, temp_section_2 = table_slice_cc2(array, r2, r_max, c1, c2)
log.debug("temp_section_1:\n{}".format(temp_section_1))
log.debug("temp_section_2:\n{}".format(temp_section_2))
log.debug("duplicate_rows= {}, duplicate_columns= {}".
format(duplicate_rows(temp_section_1), duplicate_rows(temp_section_2)))
if not duplicate_rows(temp_section_1) and not duplicate_columns(temp_section_2):
if table_object.configs['use_max_data_area']:
data_area = (r_max - r2) * (c_max - c2)
log.debug("The data area of the new candidate C2= {} is *1: {}".format((r2, c2), data_area))
log.debug("Data area:\n{}".format(array[r2 + 1:r_max + 1, c2 + 1:c_max + 1]))
if data_area >= max_area:
max_area = data_area
cc2 = (r2, c2)
log.debug("CC2= {}".format(cc2))
r2 = r2 - 1
else:
cc2 = (r2, c2)
log.debug("CC2= {}".format(cc2))
r2 = r2 - 1
elif duplicate_rows(temp_section_1) and not duplicate_columns(temp_section_2):
c2 = c2 + 1
if table_object.configs['use_max_data_area']:
data_area = (r_max - r2) * (c_max - c2)
log.debug("The data area of the new candidate C2= {} is *2: {}".format((r2, c2), data_area))
log.debug("Data area:\n{}".format(array[r2 + 1:r_max + 1, c2 + 1:c_max + 1]))
if data_area >= max_area:
max_area = data_area
cc2 = (r2, c2)
log.debug("CC2= {}".format(cc2))
else:
cc2 = (r2, c2)
log.debug("CC2= {}".format(cc2))
elif duplicate_rows(temp_section_1) and duplicate_columns(temp_section_2):
c2 = c2 + 1
r2 = r2 + 1
if table_object.configs['use_max_data_area']:
data_area = (r_max - r2) * (c_max - c2)
log.debug("The data area of the new candidate C2= {} is *3: {}".format((r2, c2), data_area))
log.debug("Data area:\n{}".format(array[r2 + 1:r_max + 1, c2 + 1:c_max + 1]))
if data_area >= max_area:
max_area = data_area
cc2 = (r2, c2)
log.debug("CC2= {}".format(cc2))
else:
cc2 = (r2, c2)
# if none of those above is satisfied, just finish the loop
else:
r2 = r2 + 1
if table_object.configs['use_max_data_area']:
data_area = (r_max - r2) * (c_max - c2)
log.debug("The data area of the new candidate C2= {} is *4: {}".format((r2, c2), data_area))
log.debug("Data area:\n{}".format(array[r2 + 1:r_max + 1, c2 + 1:c_max + 1]))
if data_area >= max_area:
max_area = data_area
cc2 = (r2, c2)
log.debug("CC2= {}".format(cc2))
break
else:
cc2 = (r2, c2)
break
log.debug(
"Ended loop with: r_max= {}, c_max= {}, c1= {}, c2= {}, r1= {}, r2= {}, cc2= {}\n\n\n\n".format(r_max,
c_max, c1,
c2, r1, r2,
cc2))
# re-initialization of r2 and c2 from cc2; missing in the pseudocode
r2 = cc2[0]
c2 = cc2[1]
# Locate CC1 at intersection of the top row and the leftmost column necessary for indexing:
log.debug("Potentially duplicate columns:\n{}".format(table_slice_1_cc1(array, r1, r2, c2, c_max)))
while not duplicate_columns(table_slice_1_cc1(array, r1, r2, c2, c_max)) and r1 <= r2:
log.debug("Potentially duplicate columns:\n{}".format(table_slice_1_cc1(array, r1, r2, c2, c_max)))
log.debug("Duplicate columns= {}".format(duplicate_columns(table_slice_1_cc1(array, r1, r2, c2, c_max))))
r1 = r1 + 1
log.debug("r1= {}".format(r1))
log.debug("Potentially duplicate rows:\n{}".format(table_slice_2_cc1(array, r2, r_max, c1, c2)))
while not duplicate_rows(table_slice_2_cc1(array, r2, r_max, c1, c2)) and c1 <= c2:
log.debug("Potentially duplicate rows:\n{}".format(table_slice_2_cc1(array, r2, r_max, c1, c2)))
log.debug("Duplicate rows= {}".format(duplicate_rows(table_slice_2_cc1(array, r2, r_max, c1, c2))))
c1 = c1 + 1
log.debug("c1= {}".format(c1))
# final cc1 is (r1-1,c1-1), because the last run of the while loops doesn't count
# a problem could arise if the code never stepped through the while loops,
# returning a cc1 with a negative index.
# however, this should never happen since the final headers CANNOT have duplicate rows/columns,
# by definition of cc2.
# hence, the assertions:
try:
assert not duplicate_columns(table_slice_1_cc1(array, r1=0, r2=cc2[0], c2=cc2[1], c_max=c_max))
assert not duplicate_rows(table_slice_2_cc1(array, r2=cc2[0], r_max=r_max, c1=0, c2=cc2[1]))
assert r1 >= 0 and c1 >= 0
cc1 = (r1 - 1, c1 - 1)
except AssertionError:
raise MIPSError("Error in _find_cc1_cc2")
# provision for using the uppermost row possible for cc1, if titles are turned of
if not table_object.configs['use_title_row']:
if cc1[0] != 0:
log.debug("METHOD. Title row removed, cc1 was shifted from {} to {}".format(cc1, (0, cc1[1])))
cc1 = (0, cc1[1])
table_object.history._title_row_removed = True
else:
table_object.history._title_row_removed = False
# provision for using only the first column of the table as row header
if table_object.configs['row_header'] is not None:
row_header = table_object.configs['row_header']
assert isinstance(row_header, int)
if table_object.history.prefixed_rows:
row_header += 1
left = min(cc1[1], row_header)
cc1 = (cc1[0], left)
cc2 = (cc2[0], row_header)
# provision for using only the first row of the table as column header
if table_object.configs['col_header'] is not None:
col_header = table_object.configs['col_header']
assert isinstance(col_header, int)
if table_object.history.prefixing_performed and not table_object.history.prefixed_rows:
col_header += 1
top = min(cc1[0], col_header)
cc1 = (top, cc1[1])
cc2 = (col_header, cc2[1])
return cc1, cc2
def find_cc3(table_object, cc2):
"""
Searches for critical cell `CC3`, as the leftmost cell of the first filled row of the data region.
.. rubric:: Comment on implementation
There are two options on how to implement the search for `CC3`:
1. With the possibility of `Notes` rows directly below the header (default):
* the first half filled row below the header is considered as the start of the data region, just like for the `CC4` cell
* implemented by Embley et. al.
2. Without the possibility of `Notes` rows directly below the header:
* the first row below the header is considered as the start of the data region
* for scientific tables it might be more common that the first data row only has a single entry
* this can be chosen my commenting/uncommenting the code within this function
:param table_object: Input Table object
:type table_object: ~tabledataextractor.table.table.Table
:param cc2: Tuple, position of `CC2` cell found with find_cc1_cc2()
:type cc2: (int,int)
:return: cc3
"""
# OPTION 1
# searching from the top of table for first half-full row, starting with first row below the header:
n_rows = len(table_object.pre_cleaned_table[cc2[0] + 1:])
log.debug("n_rows= {}".format(n_rows))
for row_index in range(cc2[0] + 1, cc2[0] + 1 + n_rows, 1):
n_full = 0
n_columns = len(table_object.pre_cleaned_table[row_index, cc2[1] + 1:])
log.debug("n_columns= {}".format(n_columns))
for column_index in range(cc2[1] + 1, cc2[1] + 1 + n_columns, 1):
empty = table_object.pre_cleaned_table_empty[row_index, column_index]
if not empty:
n_full += 1
if n_full >= int(n_columns / 2):
return row_index, cc2[1] + 1
raise MIPSError("No CC3 critical cell found! No data region defined.")
# OPTION 2
# return (cc2[0]+1,cc2[1]+1)
def find_title_row(table_object):
"""
Searches for the topmost non-empty row.
:param table_object: Input Table object
:type table_object: ~tabledataextractor.table.table.Table
:return: int
"""
for row_index, empty_row in enumerate(table_object.pre_cleaned_table_empty):
if not empty_row.all():
return row_index
def find_note_cells(table_object, labels_table):
"""
Searches for all non-empty cells that have not been labelled differently.
:param table_object: Input Table object
:type table_object: ~tabledataextractor.table.table.Table
:param labels_table: table that holds all the labels
:type labels_table: Numpy array
:return: Tuple
"""
for row_index, row in enumerate(labels_table):
for column_index, cell in enumerate(row):
if cell == '/' and not table_object.pre_cleaned_table_empty[row_index, column_index]:
yield row_index, column_index
def prefix_duplicate_labels(table_object, array):
"""
Prefixes duplicate labels | |
from datetime import timedelta
from astropy import units as u
import numpy as np
from sunpy.time import parse_time
def get_sky_position(time, offset):
"""Code for converting solar offsets to pointing position.
Parameters
----------
time: Date that is parsable by sunpy.time.parse_time()
i.e.,
time='2016-07-26T19:53:15.00'
offset: Offset from the center of the Sun. Must have units from astropy:
i.e.: offset = np.array([1000, 150]) * u.arcsec
Returns
----------
sky_position: Two-element array giving the [RA, Dec] coordinates of the
Notes
----------
Syntax:
sky_position = get_sky_position(time, offset)
"""
from astropy.coordinates import get_sun
from astropy.time import Time
# Replaced with newer sunpy v1 function
# from sunpy import sun
from sunpy.coordinates import sun
# Convert the date into something that's usable by astropy.
start_date = parse_time(time)
astro_time = Time(start_date)
# Use astropy get_sun for Sun sky position.
# sunpy has a similar function, but it may be giving a different
# epoch for the RA and dec. We need them in J2000 RA and dec.
astro_sun_pos = get_sun(astro_time)
# Get the solar north pole angle. cgs --> radians
# Update for sunpy v1.0+
# sun_np=sun.solar_north(t=time).cgs
sun_np=sun.P(time).cgs
# Get the center of the Sun, and assign it degrees.
# Doing it this was is necessary to do the vector math below.
sun_pos = np.array([astro_sun_pos.ra.deg, astro_sun_pos.dec.deg])* u.deg
# Rotation matrix for a counter-clockwise rotation since we're going
# back to celestial north from solar north
rotMatrix = np.array([[np.cos(sun_np), np.sin(sun_np)],
[-np.sin(sun_np), np.cos(sun_np)]])
# Project the offset onto the Sun
delta_offset = np.dot(offset, rotMatrix)
# Scale to RA based on the declination.
delta_offset = delta_offset * np.array([1. / np.cos(sun_pos[1]), 1.])
# Account for the fact that +Ra == East and we have defined +X = West
delta_offset = delta_offset * [-1.0, 1.0]
# Apply the offset and return the sky position.
sky_position = sun_pos + delta_offset
return sky_position
def get_skyfield_position(time, offset, load_path=None, parallax_correction=False):
"""Code for converting solar coordinates to astrometric (J200) RA/Dec coordinates.
Parameters
----------
time: Date that is parsable by sunpy.time.parse_time()
i.e.,
time='2016-07-26T19:53:15.00'
offset: Offset from the center of the Sun. Must have units from astropy:
i.e.: offset = np.array([1000, 150]) * u.arcsec
load_path (optional): Relative path from currently location to store bsp files
parallax_correction: Use the NuSTAR TLE to correct for orbital parallax
Returns
----------
sky_position: Two-element array giving the [RA, Dec] coordinates of the
target location. Note this is given in astrometric (J2000) RA/Dec, which is what
we need for the NuSTAR planning system.
Notes
----------
Syntax:
skyfield_position = get_skyfield_position(time, offset)
"""
from astropy.time import Time
# Replaced with newer sunpy v1 function
# from sunpy import sun
from sunpy.coordinates import sun
from nustar_pysolar.utils import skyfield_ephem
start_date = parse_time(time)
utc = Time(start_date)
observer, sunephem, ts = skyfield_ephem(load_path=load_path,
parallax_correction=parallax_correction,
utc=utc)
tcheck = ts.from_astropy(utc)
geocentric = observer.at(tcheck).observe(sunephem)
this_ra_geo, this_dec_geo, dist = geocentric.radec()
# Get the solar north pole angle. cgs --> radians
# sun_np = sunpy.sun.solar_north(t=time).cgs
# Update for sunpy v1.0+
sun_np=sun.P(time).cgs
# Get the center of the Sun, and assign it degrees.
# Doing it this was is necessary to do the vector math below.
sun_pos = np.array([this_ra_geo.to(u.deg).value, this_dec_geo.to(u.deg).value])*u.deg
# Rotation matrix for a counter-clockwise rotation since we're going
# back to celestial north from solar north
rotMatrix = np.array([[np.cos(sun_np), np.sin(sun_np)],
[-np.sin(sun_np), np.cos(sun_np)]])
# Project the offset onto the Sun
delta_offset = np.dot(offset, rotMatrix)
# Scale to RA based on the declination.
delta_offset = delta_offset * np.array([1. / np.cos(sun_pos[1]), 1.])
# Account for the fact that +Ra == East and we have defined +X = West
delta_offset = delta_offset * [-1.0, 1.0]
# Apply the offset and return the sky position.
sky_position = sun_pos + delta_offset
return sky_position
def get_nustar_roll(time, angle):
"""Code to determine the NuSTAR roll angle for a given field-of-view on the
Sun for a given time.
Parameters
----------
time: Date that is parsable by sunpy.time.parse_time()
i.e.
time='2016-07-26T19:53:15.00'
angle: Desired roll offset from solar north in degrees.
For a "square" field of view, use angle=0 / 90 / 180 / 270 to have DET0
at the NE / SE / SW / NW corners of a square field of view.
For a "diamond" with DET0 to the south, use angle = 45.
Returns
----------
nustar_roll: NuSTAR PA angle with respect to celestial north.
"""
# Replaced with newer sunpy v1 function
# from sunpy import sun
from sunpy.coordinates import sun
# Get the solar north pole angle. cgs --> radians
# sun_np=sun.solar_north(t=time).deg * u.deg
# Update for sunpy v1.0+
sun_np=sun.P(time).deg*u.deg
nustar_roll = np.mod(sun_np + angle, 360*u.deg)
return nustar_roll;
def _parse_timestamp(tstamp):
"""Convenience function for turning the SOC timestamp into a datetime object.
"""
date1 = tstamp.split('/')
year=date1[0].strip()
day, time=(date1[1].split())
stub = (year.strip()+'-01-01T00:00:00')
year = parse_time(stub)
hr, min, sec = time.split(':')
dt = timedelta(int(day)-1, int(sec), 0, 0, int(min), int(hr))
return year+dt;
def _parse_SOC_timestamp(tstamp):
"""Convenience function for turning the timestamp into a datetime object.
"""
date1 = tstamp.split(':')
year = date1[0]
day = date1[1]
hr = date1[2]
min = date1[3]
sec = date1[4]
stub = (year.strip()+'-01-01T00:00:00')
year = parse_time(stub)
# hr, min, sec = date1[2:4]
dt = timedelta(int(day)-1, int(sec), 0, 0, int(min), int(hr))
return year+dt;
def parse_occultations(infile):
"""Parse the shadow analysis file to determine the 'in Sun' times.
Parameters
----------
infile: Input file to be parsed.
Returns
----------
Returns a list of [ [start, stop], [start stop] ] times where start means
you egress from Earth shadow into the sunlight, while stop means you
re-enter Earth shadow.
Notes
---------
"""
f = open(infile)
all_pairs = []
start = 0
for ind,line in enumerate(f):
# Little parser here to find the right place to start reading in...
if (line.find("Shadow Begin") != -1):
start=start+1
# Skips over additional lines of whitespace.
if(start == 0):
continue
if(start <3):
start+=1
continue
# Get the first date string:
fields = line.split('-')
first = fields[0]
dtfirst = _parse_timestamp(first)
second = (fields[1].split('UTC'))[0].strip()
dtsecond=_parse_timestamp(second)
# Since the file actually gives the start/stop times of going into
# earthshadow, we actually want the "In Sun" times, which is the egress
# from earthshadow and the entry into the next earthshadow.
# Note that this skips the first row.
if(start == 3):
start+=1
else:
all_pairs.append([last, dtfirst])
# Store the last entry to add in the next time around...
last=dtsecond
f.close()
return all_pairs
def sunlight_periods(infile, tstart, tend):
"""Return the periods when NuSTAR is in Sunlight in the given timerange.
Parameters
----------
tstart, tend: ISO formatted times or something else that
sunpy.time.parse_time() can read.
i.e.
tstart='2017-03-11T23:09:10'
infile: Input file to be parsed. This should the value returned by
nustar_pysolar.download_occultation_times()
Returns
----------
Returns a list of [ [start, stop], [start stop] ] times where start means
you egress from Earth shadow into the sunlight, while stop means you
re-enter Earth shadow.
The list has been filtered to only include those epochs that span the given
time range.
Notes
---------
"""
import os.path
if not(os.path.isfile(infile)):
print('Error in nustar_pysolar.sunlight_periods.')
print('Input file: '+infile+' does not exist.')
return -1;
all_pairs = parse_occultations(infile)
checkstart = parse_time(tstart)
checkend = parse_time(tend)
in_range = []
set=0
for pair in all_pairs:
dtmin = (pair[0] - checkstart)
dtmax = (pair[1] - checkstart)
if ( (pair[1] > checkstart) ):
set=1
if (set == 0):
continue
if ( pair[1] > checkend ):
break
in_range.append(pair)
if len(in_range) == 0:
print('Error in function: '+sunlight_periods.__name__)
print('No dates found in range. Pick a different occultation file.')
return -1
else:
return in_range
def make_mosaic(orbit, outfile='mosaic.txt', write_output=False, make_regions=False,
reg_pref='testbox', extra_roll=0.*u.deg, write_sun=False):
'''
Code to make a mosaic for a 5x5 tiled array on the Sun.
Input:
tstart = '2018-05-28T15:37:00'
tend = '2018-05-28T23:10:00'
positions = make_mosaic(tstart, tend, write_output=True)
Optional flags:
write_output = [False] / True
Write the output pointing positions in NuSTAR SOC readable formats in 'outfile' for all of the pointings.
outfile = ['mosaic.txt']
Output | |
<reponame>ActionAnalytics/tfrs
"""
REST API Documentation for the NRS TFRS Credit Trading Application
The Transportation Fuels Reporting System is being designed to streamline
compliance reporting for transportation fuel suppliers in accordance with
the Renewable & Low Carbon Fuel Requirements Regulation.
OpenAPI spec version: v1
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import json
from django.test import TestCase
from django.test import Client
from django.core.files.uploadedfile import SimpleUploadedFile
import django
from rest_framework import status
from . import fakedata
# Custom API test cases.
# If an API operation does not contains generated code then it is tested in this
# file.
#
class Test_Api_Custom(TestCase):
fixtures = ['organization_types.json',
'organization_government.json',
'organization_balance_gov.json',
'credit_trade_statuses.json',
'credit_trade_statuses_refused.json',
'organization_actions_types.json',
'organization_statuses.json',
'credit_trade_types.json',
'test_organization_fuel_suppliers.json',
'test_users.json',
]
def setUp(self):
# Every test needs a client.
self.client = Client(
HTTP_SMGOV_USERGUID='c9804c52-05f1-4a6a-9d24-332d9d8be2a9',
HTTP_SMAUTH_USERDISPLAYNAME='<NAME>',
HTTP_SMGOV_USEREMAIL='<EMAIL>',
HTTP_SM_UNIVERSALID='BSmith')
# needed to setup django
django.setup()
def createOrganizationStatus(self):
testUrl = "/api/organization_statuses"
payload = fakedata.OrganizationStatusTestDataCreate()
payload['effective_date'] = '2017-01-01'
jsonString = json.dumps(payload)
response = self.client.post(testUrl, content_type='application/json', data=jsonString)
# Check that the response is OK.
assert status.HTTP_201_CREATED == response.status_code
# parse the response.
jsonString = response.content.decode("utf-8")
data = json.loads(jsonString)
createdId = data['id']
return createdId
def createOrganizationActionType(self):
testUrl = "/api/organization_actions_types"
payload = fakedata.OrganizationActionsTypeTestDataCreate()
jsonString = json.dumps(payload)
response = self.client.post(testUrl, content_type='application/json', data=jsonString)
# Check that the response is OK.
assert status.HTTP_201_CREATED == response.status_code
# parse the response.
jsonString = response.content.decode("utf-8")
data = json.loads(jsonString)
createdId = data['id']
return createdId
def createOrganization(self):
statusId = self.createOrganizationStatus()
actionsTypeId = self.createOrganizationActionType()
testUrl = "/api/organizations"
# Create:
payload = {
'name': "Initial",
'created_date': '2000-01-01',
# 'primaryContact': contactId ,
# 'contacts': [contactId],
'notes': [],
'attachments': [],
'history': [],
'status': statusId,
'actions_type': actionsTypeId,
}
jsonString = json.dumps(payload)
response = self.client.post(testUrl, content_type='application/json', data=jsonString)
# Check that the response is OK.
assert status.HTTP_201_CREATED == response.status_code
# parse the response.
jsonString = response.content.decode("utf-8")
data = json.loads(jsonString)
createdId = data['id']
return createdId, statusId, actionsTypeId
def createRole(self):
testUrl = "/api/roles"
# Create:
fakeRole = fakedata.RoleTestDataCreate()
payload = {
'name': fakeRole['name'],
'description': fakeRole['description']
}
jsonString = json.dumps(payload)
response = self.client.post(testUrl, content_type='application/json', data=jsonString)
# Check that the response is OK.
assert status.HTTP_201_CREATED == response.status_code
# parse the response.
jsonString = response.content.decode("utf-8")
data = json.loads(jsonString)
createdId = data['id']
return createdId
def createPermission(self):
testUrl = "/api/permissions"
# Create:
fakePermission = fakedata.PermissionTestDataCreate()
payload = {
'code': fakePermission['code'],
'name': fakePermission['name'],
'description': fakePermission['description']
}
jsonString = json.dumps(payload)
response = self.client.post(testUrl, content_type='application/json', data=jsonString)
# Check that the response is OK.
assert status.HTTP_201_CREATED == response.status_code
# parse the response.
jsonString = response.content.decode("utf-8")
data = json.loads(jsonString)
createdId = data['id']
return createdId
def createUser(self, organization_id):
testUserUrl = "/api/users"
# Create:
fakeUser = fakedata.UserTestDataCreate()
payload = {
'firstName': fakeUser['first_name'],
'lastName':fakeUser['last_name'],
'email':fakeUser['email'],
'status':'Active',
'username': fakeUser['username'],
'authorizationGuid':fakeUser['authorization_guid'],
'authorizationDirectory':fakeUser['authorization_directory'],
'organization': organization_id
}
jsonString = json.dumps(payload)
response = self.client.post(testUserUrl, content_type='application/json', data=jsonString)
# Check that the response is OK.
assert status.HTTP_201_CREATED == response.status_code
# parse the response.
jsonString = response.content.decode("utf-8")
data = json.loads(jsonString)
user_headers = {
'authorizationGuid': data['authorizationGuid'],
'displayName': data['displayName'],
'email': data['email'],
'username': data['email'],
'id': data['id']
}
return user_headers
def createCreditTradeType(self):
testUrl = "/api/credittradetypes"
payload = fakedata.CreditTradeTypeTestDataCreate()
payload['expiration_date'] = '2017-01-02'
payload['effective_date'] = '2017-01-01'
jsonString = json.dumps(payload)
response = self.client.post(testUrl, content_type='application/json', data=jsonString)
# Check that the response is OK.
assert status.HTTP_201_CREATED == response.status_code
# parse the response.
jsonString = response.content.decode("utf-8")
data = json.loads(jsonString)
createdId = data['id']
return createdId
def createCreditTradeStatus(self):
testUrl = "/api/credittradestatuses"
payload = fakedata.CreditTradeStatusTestDataCreate()
payload['effective_date'] = '2017-01-01'
jsonString = json.dumps(payload)
response = self.client.post(testUrl, content_type='application/json', data=jsonString)
# Check that the response is OK.
assert status.HTTP_201_CREATED == response.status_code
# parse the response.
jsonString = response.content.decode("utf-8")
data = json.loads(jsonString)
createdId = data['id']
return createdId
def createCreditTrade(self, organization_id, authorization_id):
typeId = self.createCreditTradeType()
statusId = self.createCreditTradeStatus()
testUrl = "/api/credittrades"
payload = {
'status':'Active',
'initiator':organization_id,
'respondent': organization_id,
'initiatorLastUpdateBy': authorization_id,
'respondentLastUpdatedBy': None,
'reviewedRejectedBy': None,
'approvedRejectedBy': None,
'cancelledBy': None,
'tradeExecutionDate': '2017-01-01',
# TODO: replace transactionType
'transactionType':'Type',
'fairMarketValuePrice': '100.00',
'notes':[],
'attachments':[],
'history':[],
'type': typeId,
'status': statusId,
'respondent': organization_id,
}
fakeCreditTrade = fakedata.CreditTradeTestDataCreate()
payload.update(fakeCreditTrade)
jsonString = json.dumps(payload)
response = self.client.post(testUrl, content_type='application/json', data=jsonString)
# Check that the response is OK.
assert status.HTTP_201_CREATED == response.status_code
# parse the response.
jsonString = response.content.decode("utf-8")
data = json.loads(jsonString)
createdId = data['id']
return createdId, typeId, statusId
def deleteRole(self, role_id):
deleteUrl = "/api/roles/" + str(role_id) + "/delete"
response = self.client.post(deleteUrl)
# Check that the response is OK.
assert status.HTTP_204_NO_CONTENT == response.status_code
def deleteUser(self, authorization_id):
deleteUrl = "/api/users/" + str(authorization_id) + "/delete"
response = self.client.post(deleteUrl)
# Check that the response is OK
assert status.HTTP_204_NO_CONTENT == response.status_code
def deleteOrganization(self, organization_id):
deleteUrl = "/api/organizations/" + str(organization_id) + "/delete"
response = self.client.put(deleteUrl)
# Check that the response is OK.
assert status.HTTP_204_NO_CONTENT == response.status_code
def deleteCreditTrade(self, creditTradeId):
deleteUrl = "/api/credittrades/" + str(creditTradeId) + "/delete"
response = self.client.post(deleteUrl)
# Check that the response is OK.
assert status.HTTP_204_NO_CONTENT == response.status_code
def deletePermission(self, permission_id):
deleteUrl = "/api/permissions/" + str(permission_id) + "/delete"
response = self.client.post(deleteUrl)
# Check that the response is OK.
assert status.HTTP_204_NO_CONTENT == response.status_code
def test_credittradesSearchGet(self):
fsId, _, _ = self.createOrganization()
user = self.createUser(fsId)
credId, credTypeId, _ = self.createCreditTrade(fsId, user.get('id'))
testUrl = "/api/credittrades/search"
response = self.client.get(testUrl)
assert status.HTTP_200_OK == response.status_code
jsonString = response.content.decode("utf-8")
data = json.loads(jsonString)
assert len(data) == 1
self.deleteCreditTrade(credId)
self.deleteUser(user.get('id'))
self.deleteOrganization(fsId)
def test_usersCurrentGet(self):
organization_id, statusId, actionId = self.createOrganization()
user = self.createUser(organization_id)
testUrl="/api/users/current"
# List:
response = self.client.get(testUrl)
assert status.HTTP_200_OK == response.status_code
self.deleteUser (user.get('id'))
self.deleteOrganization(organization_id)
def test_rolesIdPermissionsGet(self):
# create a group.
role_id = self.createRole()
# create a permission.
permission_id = self.createPermission()
rolePermissionUrl = "/api/roles/" + str(role_id) + "/permissions"
# create a new group membership.
payload = {'role':role_id, 'permission':permission_id}
jsonString = json.dumps(payload)
response = self.client.post(rolePermissionUrl,content_type='application/json', data=jsonString)
assert status.HTTP_200_OK == response.status_code
jsonString = response.content.decode("utf-8")
data = json.loads(jsonString)
rolePermissionId = data['id']
# test the get
response = self.client.get(rolePermissionUrl)
assert status.HTTP_200_OK == response.status_code
# test the put. This will also delete the RolePermission.
payload = []
jsonString = json.dumps(payload)
response = self.client.put(rolePermissionUrl,content_type='application/json', data=jsonString)
assert status.HTTP_200_OK == response.status_code
# cleanup
self.deleteRole(role_id)
self.deletePermission(permission_id)
def test_rolesIdUsersGet(self):
role_id = self.createRole()
organization_id, statusId, actionId = self.createOrganization()
user = self.createUser(organization_id)
userRoleUrl = "/api/users/" + str(user.get('id')) + "/roles"
# create a new UserRole.
payload = {
'effective_date': '2000-01-01',
'expiration_date': None,
'user': user.get('id'),
'role': role_id
}
jsonString = json.dumps(payload)
response = self.client.post(userRoleUrl,content_type='application/json', data=jsonString)
assert status.HTTP_200_OK == response.status_code
# test the get
response = self.client.get(userRoleUrl)
assert status.HTTP_200_OK == response.status_code
testUrl = "/api/roles/" + str(role_id)
# get the users in the group.
response = self.client.get(testUrl)
# Check that the response is OK.
assert status.HTTP_200_OK == response.status_code
# parse the response.
jsonString = response.content.decode("utf-8")
data = json.loads(jsonString)
# test the PUT - this will clear the user role map.
payload = []
jsonString = json.dumps(payload)
response = self.client.put(userRoleUrl,content_type='application/json', data=jsonString)
assert status.HTTP_200_OK == response.status_code
# cleanup
self.deleteRole(role_id)
self.deleteUser(user.get('id'))
self.deleteOrganization(organization_id)
def test_usersIdPermissionsGet(self):
# create a user.
organization_id, statusId, actionId = self.createOrganization()
user = self.createUser(organization_id)
# create a credit trade
# notificationEventId = self.createUser(organization_id)
# assign permissions to the user.
#TODO add that.
userPermissionUrl = "/api/users/" + str(user.get('id')) + "/permissions"
# test the Get
response = self.client.get(userPermissionUrl)
assert status.HTTP_200_OK == response.status_code
# cleanup
self.deleteUser(user.get('id'))
self.deleteOrganization(organization_id)
def test_usersSearchGet(self):
organization_id, statusId, actionId = self.createOrganization()
user = self.createUser(organization_id)
# do a search
testUrl = "/api/users/search"
response = self.client.get(testUrl)
# Check that the response is OK.
assert status.HTTP_200_OK == response.status_code
# parse the response.
jsonString = response.content.decode("utf-8")
data = json.loads(jsonString)
# Cleanup
self.deleteUser(user.get('id'))
self.deleteOrganization(organization_id)
def test_createCreditTradeNegativeNumberOfCredits(self):
fsId, _, _ = self.createOrganization()
user = self.createUser(fsId)
typeId = self.createCreditTradeType()
statusId = self.createCreditTradeStatus()
testUrl = "/api/credittrades"
payload = {
'status': statusId,
'type': typeId,
'fairMarketValuePrice': '100.00',
'historySet':[],
'initiator': fsId,
'respondent': fsId,
'trade_effective_date': '2017-01-01',
}
fakeCreditTrade = fakedata.CreditTradeTestDataCreate()
payload.update(fakeCreditTrade)
payload['number_of_credits'] = -1
jsonString = json.dumps(payload)
response = self.client.post(testUrl, content_type='application/json', data=jsonString)
# Check that the response is OK.
assert status.HTTP_422_UNPROCESSABLE_ENTITY == response.status_code
self.deleteUser(user.get('id'))
self.deleteOrganization(fsId)
if __name__ == '__main__':
| |
# -*- coding: utf-8 -*- {{{
# vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et:
#
# Copyright 2019, Battelle Memorial Institute.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# This material was prepared as an account of work sponsored by an agency of
# the United States Government. Neither the United States Government nor the
# United States Department of Energy, nor Battelle, nor any of their
# employees, nor any jurisdiction or organization that has cooperated in the
# development of these materials, makes any warranty, express or
# implied, or assumes any legal liability or responsibility for the accuracy,
# completeness, or usefulness or any information, apparatus, product,
# software, or process disclosed, or represents that its use would not infringe
# privately owned rights. Reference herein to any specific commercial product,
# process, or service by trade name, trademark, manufacturer, or otherwise
# does not necessarily constitute or imply its endorsement, recommendation, or
# favoring by the United States Government or any agency thereof, or
# Battelle Memorial Institute. The views and opinions of authors expressed
# herein do not necessarily state or reflect those of the
# United States Government or any agency thereof.
#
# PACIFIC NORTHWEST NATIONAL LABORATORY operated by
# BATTELLE for the UNITED STATES DEPARTMENT OF ENERGY
# under Contract DE-AC05-76RL01830
# }}}
import random
import tempfile
import os
from datetime import datetime, timedelta
import gevent
import pytest
from pytest import approx
from volttron.platform import get_services_core
from volttron.platform.agent import utils
from volttron.platform.messaging import headers as headers_mod
from volttron.platform.vip.agent import Agent
from volttron.platform.keystore import KnownHostsStore
# import types
forwarder_uuid = None
forwarder_config = {
"destination-vip": "",
"custom_topic_list": [],
"topic_replace_list": [
{"from": "PNNL/BUILDING_1", "to": "PNNL/BUILDING1_ANON"}
]
}
sqlite_config = {
"connection": {
"type": "sqlite",
"params": {
"database": 'test.sqlite'
}
}
}
volttron_instance1 = None
volttron_instance2 = None
@pytest.fixture(scope="module")
def volttron_instances(request, get_volttron_instances):
global volttron_instance1, volttron_instance2
# if volttron_instance1 is None:
volttron_instance1, volttron_instance2 = get_volttron_instances(2)
# Fixture for setup and teardown of publish agent
@pytest.fixture(scope="module")
def publish_agent(request, volttron_instances, forwarder):
global volttron_instance1, volttron_instance2
# 1: Start a fake agent to publish to message bus
agent = volttron_instance1.build_agent(identity='test-agent')
# 2: add a tear down method to stop sqlhistorian agent and the fake
# agent that published to message bus
def stop_agent():
print("In teardown method of publish_agent")
if isinstance(agent, Agent):
agent.core.stop()
request.addfinalizer(stop_agent)
return agent
@pytest.fixture(scope="module")
def query_agent(request, volttron_instances, sqlhistorian):
# 1: Start a fake agent to query the sqlhistorian in volttron_instance2
agent = volttron_instance2.build_agent()
# 2: add a tear down method to stop sqlhistorian agent and the fake
# agent that published to message bus
def stop_agent():
print("In teardown method of module")
agent.core.stop()
request.addfinalizer(stop_agent)
return agent
@pytest.fixture(scope="module")
def sqlhistorian(request, volttron_instances):
global volttron_instance1, volttron_instance2
global sqlite_config
# 1: Install historian agent
# Install and start sqlhistorian agent in instance2
agent_uuid = volttron_instance2.install_agent(
agent_dir=get_services_core("SQLHistorian"),
config_file=sqlite_config,
start=True,
vip_identity='platform.historian')
print("sqlite historian agent id: ", agent_uuid)
@pytest.fixture(scope="module")
def forwarder(request, volttron_instances):
#print "Fixture forwarder"
global volttron_instance1, volttron_instance2
global forwarder_uuid, forwarder_config
# 1. Update destination address in forwarder configuration
volttron_instance1.allow_all_connections()
volttron_instance2.allow_all_connections()
# setup destination address to include keys
known_hosts_file = os.path.join(volttron_instance1.volttron_home, 'known_hosts')
known_hosts = KnownHostsStore(known_hosts_file)
known_hosts.add(volttron_instance2.vip_address, volttron_instance2.serverkey)
forwarder_config["destination-vip"] = volttron_instance2.vip_address
forwarder_config["destination-serverkey"] = volttron_instance2.serverkey
# 1: Install historian agent
# Install and start sqlhistorian agent in instance2
forwarder_uuid = volttron_instance1.install_agent(
agent_dir=get_services_core("ForwardHistorian"),
config_file=forwarder_config,
start=True)
print("forwarder agent id: ", forwarder_uuid)
def publish(publish_agent, topic, header, message):
if isinstance(publish_agent, Agent):
publish_agent.vip.pubsub.publish('pubsub',
topic,
headers=header,
message=message).get(timeout=10)
else:
publish_agent.publish_json(topic, header, message)
@pytest.mark.historian
@pytest.mark.forwarder
def test_devices_topic(publish_agent, query_agent):
"""
Test if devices topic message is getting forwarded to historian running on
another instance. Test if topic name substitutions happened.
Publish to 'devices/PNNL/BUILDING_1/Device/all' in volttron_instance1 and query
for topic 'devices/PNNL/BUILDING1_ANON/Device/all' in volttron_instance
@param publish_agent: Fake agent used to publish messages to bus in
volttron_instance1. Calling this fixture makes sure all the dependant
fixtures are called to setup and start volttron_instance1 and forwareder
agent and returns the instance of fake agent to publish
@param query_agent: Fake agent used to query sqlhistorian in
volttron_instance. Calling this fixture makes sure all the dependant
fixtures are called to setup and start volttron_instance and sqlhistorian
agent and returns the instance of a fake agent to query the historian
"""
print("\n** test_devices_topic **")
oat_reading = random.uniform(30, 100)
float_meta = {'units': 'F', 'tz': 'UTC', 'type': 'float'}
# Create a message for all points.
all_message = [{'OutsideAirTemperature': oat_reading},
{'OutsideAirTemperature': float_meta}]
# Publish messages twice
time1 = utils.format_timestamp(datetime.utcnow())
headers = {
headers_mod.DATE: time1
}
publish(publish_agent, 'devices/PNNL/BUILDING_1/Device/all', headers, all_message)
gevent.sleep(1)
# Verify topic name replacement by querying the replaced topic name
# PNNL/BUILDING_1 should be replaced with PNNL/BUILDING1_ANON
result = query_agent.vip.rpc.call(
'platform.historian',
'query',
topic='PNNL/BUILDING1_ANON/Device/OutsideAirTemperature',
start=time1,
count=20,
order="LAST_TO_FIRST").get(timeout=10)
assert (len(result['values']) == 1)
(time1_date, time1_time) = time1.split("T")
assert (result['values'][0][0] == time1_date + 'T' + time1_time + '+00:00')
assert (result['values'][0][1] == approx(oat_reading))
assert set(result['metadata'].items()) == set(float_meta.items())
@pytest.mark.historian
@pytest.mark.forwarder
def test_analysis_topic(publish_agent, query_agent):
"""
Test if devices topic message is getting forwarded to historian running on
another instance. Test if topic name substitutions happened.
Publish to topic
'analysis/PNNL/BUILDING_1/Device/MixedAirTemperature' in volttron_instance1 and
query for topic
'PNNL/BUILDING1_ANON/Device/MixedAirTemperature' in volttron_instance
@param publish_agent: Fake agent used to publish messages to bus in
volttron_instance1. Calling this fixture makes sure all the dependant
fixtures are called to setup and start volttron_instance1 and forwareder
agent and returns the instance of fake agent to publish
@param query_agent: Fake agent used to query sqlhistorian in
volttron_instance. Calling this fixture makes sure all the dependant
fixtures are called to setup and start volttron_instance and sqlhistorian
agent and returns the instance of a fake agent to query the historian
"""
print("\n** test_analysis_topic **")
# Publish fake data. The format mimics the format used by VOLTTRON drivers.
# Make some random readings
oat_reading = random.uniform(30, 100)
mixed_reading = oat_reading + random.uniform(-5, 5)
damper_reading = random.uniform(0, 100)
# Create a message for all points.
all_message = [{'OutsideAirTemperature': oat_reading,
'MixedAirTemperature': mixed_reading,
'DamperSignal': damper_reading},
{'OutsideAirTemperature': {'units': 'F', 'tz': 'UTC',
'type': 'float'},
'MixedAirTemperature': {'units': 'F', 'tz': 'UTC',
'type': 'float'},
'DamperSignal': {'units': '%', 'tz': 'UTC',
'type': 'float'}
}]
# Create timestamp
now = utils.format_timestamp(datetime.utcnow())
print("now is ", now)
headers = {
headers_mod.DATE: now,
headers_mod.TIMESTAMP: now
}
# Publish messages
publish(publish_agent, 'analysis/PNNL/BUILDING_1/Device',
headers, all_message)
gevent.sleep(0.5)
# pytest.set_trace()
# Query the historian
result = query_agent.vip.rpc.call(
'platform.historian',
'query',
topic='PNNL/BUILDING1_ANON/Device/MixedAirTemperature',
start=now,
order="LAST_TO_FIRST").get(timeout=10)
print('Query Result', result)
assert (len(result['values']) == 1)
(now_date, now_time) = now.split("T")
if now_time[-1:] == 'Z':
now_time = now_time[:-1]
assert (result['values'][0][0] == now_date + 'T' + now_time + '+00:00')
assert (result['values'][0][1] == approx(mixed_reading))
@pytest.mark.historian
@pytest.mark.forwarder
def test_analysis_topic_no_header(publish_agent, query_agent):
"""
Test if devices topic message is getting forwarded to historian running on
another instance. Test if topic name substitutions happened.
Publish to topic
'analysis/PNNL/BUILDING_1/Device/MixedAirTemperature' in volttron_instance1 and
query for topic
'PNNL/BUILDING1_ANON/Device/MixedAirTemperature' in volttron_instance
@param publish_agent: Fake agent used to publish messages to bus in
volttron_instance1. Calling this fixture makes sure all the dependant
fixtures are called to setup and start volttron_instance1 and forwareder
agent and returns the instance of fake agent to publish
@param query_agent: Fake agent used to query sqlhistorian in
volttron_instance. Calling this fixture makes sure all the dependant
fixtures are called to setup and start volttron_instance and sqlhistorian
agent and returns the instance of a fake agent to query the historian
"""
print("\n** test_analysis_topic **")
# Publish fake data. The format mimics the format used by VOLTTRON drivers.
# Make some random readings
oat_reading = random.uniform(30, 100)
mixed_reading = oat_reading + random.uniform(-5, 5)
damper_reading = random.uniform(0, 100)
# Create a message for all points.
all_message = [{'OutsideAirTemperature': oat_reading,
'MixedAirTemperature': mixed_reading,
'DamperSignal': damper_reading},
{'OutsideAirTemperature': {'units': 'F', 'tz': 'UTC',
'type': 'float'},
'MixedAirTemperature': {'units': 'F', 'tz': 'UTC',
'type': 'float'},
'DamperSignal': {'units': '%', 'tz': 'UTC',
'type': 'float'}
}]
# Create timestamp
now = datetime.utcnow().isoformat() + 'Z'
print("now is ", now)
# Publish messages
publish(publish_agent, 'analysis/PNNL/BUILDING_1/Device',
None, all_message)
gevent.sleep(0.5)
# pytest.set_trace()
# Query the historian
result = query_agent.vip.rpc.call(
'platform.historian',
'query',
topic='PNNL/BUILDING1_ANON/Device/MixedAirTemperature',
start=now,
order="LAST_TO_FIRST").get(timeout=10)
print('Query Result', result)
assert | |
'C', 10,'AN'],
['SRT07', 'C', 10,'AN'],
['SRT08', 'C', 2,'AN'],
['SRT09', 'C', 7,'N1'],
['SRT10', 'C', 7,'N1'],
['SRT11', 'C', 3,'R'],
['SRT12', 'C', (3,3),'AN'],
['SRT13', 'C', (2,25),'AN'],
],
'SS': [
['BOTSID', 'M', 3,'AN'],
['SS01', 'M', (6,6),'DT'],
['SS02', 'M', 5,'AN'],
['SS03', 'M', 1,'AN'],
['SS04', 'M', 1,'AN'],
['SS05', 'C', (6,6),'DT'],
['SS06', 'C', (6,6),'DT'],
['SS07', 'C', 3,'R'],
['SS08', 'C', (6,6),'DT'],
['SS09', 'C', 1,'AN'],
],
'SSE': [
['BOTSID', 'M', 3,'AN'],
['SSE01', 'C', (6,6),'DT'],
['SSE02', 'C', (6,6),'DT'],
['SSE03', 'C', (3,3),'AN'],
],
'SSS': [
['BOTSID', 'M', 3,'AN'],
['SSS01', 'M', 1,'AN'],
['SSS02', 'M', (2,2),'AN'],
['SSS03', 'M', (2,10),'AN'],
['SSS04', 'C', 45,'AN'],
['SSS05', 'C', 9,'R'],
['SSS06', 'C', 9,'N2'],
['SSS07', 'C', 80,'AN'],
['SSS08', 'C', 15,'R'],
['SSS09', 'C', 15,'AN'],
],
'SST': [
['BOTSID', 'M', 3,'AN'],
['SST01', 'C', (3,3),'AN'],
['SST02', 'C', (2,3),'AN'],
['SST03', 'C', 35,'AN'],
['SST04', 'C', (3,3),'AN'],
['SST05', 'C', (2,3),'AN'],
['SST06', 'C', 35,'AN'],
['SST07', 'C', (3,3),'AN'],
['SST08', 'C', (2,2),'AN'],
['SST09', 'C', 1,'AN'],
],
'ST': [
['BOTSID', 'M', 3,'AN'],
['ST01', 'M', (3,3),'AN'],
['ST02', 'M', (4,9),'AN'],
],
'STA': [
['BOTSID', 'M', 3,'AN'],
['STA01', 'M', (2,2),'AN'],
['STA02', 'M', 10,'R'],
['STA03', 'C', (2,2),'AN'],
['STA04', 'C', 3,'AN'],
['STA05', 'C', (2,2),'AN'],
['STA06', 'C', 10,'R'],
['STA07', 'C', 10,'R'],
],
'STC': [
['BOTSID', 'M', 3,'AN'],
['STC01', 'M', 20,'AN'],
['STC02', 'C', (6,6),'DT'],
['STC03', 'C', 2,'AN'],
['STC04', 'C', 15,'R'],
['STC05', 'C', 15,'R'],
['STC06', 'C', (6,6),'DT'],
['STC07', 'C', (3,3),'AN'],
['STC08', 'C', (6,6),'DT'],
['STC09', 'C', 16,'AN'],
['STC10', 'C', 20,'AN'],
['STC11', 'C', 20,'AN'],
['STC12', 'C', 264,'AN'],
],
'SUM': [
['BOTSID', 'M', 3,'AN'],
['SUM01', 'C', 1,'AN'],
['SUM02', 'C', 2,'AN'],
['SUM03', 'C', 1,'AN'],
['SUM04', 'C', 15,'R'],
['SUM05', 'C', 15,'R'],
['SUM06', 'C', 15,'R'],
['SUM07', 'C', 10,'R'],
['SUM08', 'C', 10,'R'],
['SUM09', 'C', 6,'R'],
['SUM10', 'C', 1,'AN'],
['SUM11', 'C', 4,'R'],
['SUM12', 'C', 15,'R'],
['SUM13', 'C', (2,3),'AN'],
['SUM14', 'C', 35,'AN'],
['SUM15', 'C', 15,'R'],
['SUM16', 'C', 15,'R'],
['SUM17', 'C', 15,'R'],
],
'SUP': [
['BOTSID', 'M', 3,'AN'],
['SUP01', 'M', (3,3),'AN'],
['SUP02', 'C', (2,4),'AN'],
['SUP03', 'C', 60,'AN'],
['SUP04', 'C', (2,2),'AN'],
],
'SV': [
['BOTSID', 'M', 3,'AN'],
['SV01', 'C', (2,2),'AN'],
['SV02', 'C', 4,'N1'],
['SV03', 'C', 4,'N1'],
['SV04', 'C', 1,'AN'],
],
'SV1': [
['BOTSID', 'M', 3,'AN'],
['SV101', 'M', [
['SV101.01', 'M', (2,2),'AN'],
['SV101.02', 'M', 30,'AN'],
['SV101.03', 'C', (2,2),'AN'],
['SV101.04', 'C', (2,2),'AN'],
['SV101.05', 'C', (2,2),'AN'],
['SV101.06', 'C', (2,2),'AN'],
['SV101.07', 'C', 80,'AN'],
]],
['SV102', 'C', 15,'R'],
['SV103', 'C', (2,2),'AN'],
['SV104', 'C', 15,'R'],
['SV105', 'C', 2,'AN'],
['SV106', 'C', 2,'AN'],
['SV107', 'C', [
['SV107.01', 'M', 2,'R'],
['SV107.02', 'C', 2,'R'],
['SV107.03', 'C', 2,'R'],
['SV107.04', 'C', 2,'R'],
]],
['SV108', 'C', 15,'R'],
['SV109', 'C', 1,'AN'],
['SV110', 'C', 2,'AN'],
['SV111', 'C', 1,'AN'],
['SV112', 'C', 1,'AN'],
['SV113', 'C', 2,'AN'],
['SV114', 'C', 2,'AN'],
['SV115', 'C', 1,'AN'],
['SV116', 'C', 1,'AN'],
['SV117', 'C', 30,'AN'],
['SV118', 'C', (3,9),'AN'],
['SV119', 'C', 15,'R'],
['SV120', 'C', 1,'AN'],
['SV121', 'C', 1,'AN'],
],
'SV2': [
['BOTSID', 'M', 3,'AN'],
['SV201', 'C', 30,'AN'],
['SV202', 'C', [
['SV202.01', 'M', (2,2),'AN'],
['SV202.02', 'M', 30,'AN'],
['SV202.03', 'C', (2,2),'AN'],
['SV202.04', 'C', (2,2),'AN'],
['SV202.05', 'C', (2,2),'AN'],
['SV202.06', 'C', (2,2),'AN'],
['SV202.07', 'C', 80,'AN'],
]],
['SV203', 'C', 15,'R'],
['SV204', 'C', (2,2),'AN'],
['SV205', 'C', 15,'R'],
['SV206', 'C', 10,'R'],
['SV207', 'C', 15,'R'],
['SV208', 'C', 1,'AN'],
['SV209', 'C', 1,'AN'],
['SV210', 'C', 1,'AN'],
],
'SV3': [
['BOTSID', 'M', 3,'AN'],
['SV301', 'M', [
['SV301.01', 'M', (2,2),'AN'],
['SV301.02', 'M', 30,'AN'],
['SV301.03', 'C', (2,2),'AN'],
['SV301.04', 'C', (2,2),'AN'],
['SV301.05', 'C', (2,2),'AN'],
['SV301.06', 'C', (2,2),'AN'],
['SV301.07', 'C', 80,'AN'],
]],
['SV302', 'M', 15,'R'],
['SV303', 'C', 2,'AN'],
['SV304', 'C', 30,'AN'],
['SV305', 'C', 2,'AN'],
['SV306', 'C', 3,'AN'],
['SV307', 'C', 1,'AN'],
['SV308', 'C', 15,'R'],
['SV309', 'C', 80,'AN'],
['SV310', 'C', 1,'AN'],
['SV311', 'C', 1,'AN'],
],
'SV4': [
['BOTSID', 'M', 3,'AN'],
['SV401', 'M', 30,'AN'],
['SV402', 'C', [
['SV402.01', 'M', (2,2),'AN'],
['SV402.02', 'M', 30,'AN'],
['SV402.03', 'C', (2,2),'AN'],
['SV402.04', 'C', (2,2),'AN'],
['SV402.05', 'C', (2,2),'AN'],
['SV402.06', 'C', (2,2),'AN'],
['SV402.07', 'C', 80,'AN'],
]],
['SV403', 'C', 30,'AN'],
['SV404', 'C', 1,'AN'],
['SV405', 'C', 1,'AN'],
['SV406', 'C', 3,'AN'],
['SV407', 'C', 1,'AN'],
['SV408', 'C', 80,'AN'],
['SV409', 'C', 1,'AN'],
['SV410', 'C', 1,'AN'],
['SV411', 'C', 1,'AN'],
['SV412', 'C', 2,'AN'],
['SV413', 'C', 1,'AN'],
['SV414', 'C', (2,2),'AN'],
['SV415', 'C', 1,'AN'],
['SV416', 'C', 1,'AN'],
['SV417', 'C', 1,'AN'],
['SV418', 'C', 1,'AN'],
],
'SV5': [
['BOTSID', 'M', 3,'AN'],
['SV501', 'M', [
['SV501.01', 'M', (2,2),'AN'],
['SV501.02', 'M', 30,'AN'],
['SV501.03', 'C', (2,2),'AN'],
['SV501.04', 'C', (2,2),'AN'],
['SV501.05', 'C', (2,2),'AN'],
['SV501.06', 'C', (2,2),'AN'],
['SV501.07', 'C', 80,'AN'],
]],
['SV502', 'M', (2,2),'AN'],
['SV503', 'M', 15,'R'],
['SV504', 'C', 15,'R'],
['SV505', 'C', 15,'R'],
['SV506', 'C', 1,'AN'],
],
'SV6': [
['BOTSID', 'M', 3,'AN'],
['SV601', 'M', [
['SV601.01', 'M', (2,2),'AN'],
['SV601.02', 'M', 30,'AN'],
['SV601.03', 'C', (2,2),'AN'],
['SV601.04', 'C', (2,2),'AN'],
['SV601.05', 'C', (2,2),'AN'],
['SV601.06', 'C', (2,2),'AN'],
['SV601.07', 'C', 80,'AN'],
]],
['SV602', 'C', 2,'AN'],
['SV603', 'C', 2,'AN'],
['SV604', 'C', 15,'R'],
['SV605', 'C', [
['SV605.01', 'M', 2,'R'],
['SV605.02', 'C', 2,'R'],
['SV605.03', 'C', 2,'R'],
['SV605.04', 'C', 2,'R'],
]],
['SV606', 'C', 15,'R'],
['SV607', 'C', 1,'AN'],
],
'SV7': [
['BOTSID', 'M', 3,'AN'],
['SV701', 'C', 30,'AN'],
['SV702', 'C', 30,'AN'],
['SV703', 'C', (2,2),'AN'],
],
'SVC': [
['BOTSID', 'M', 3,'AN'],
['SVC01', 'M', [
['SVC01.01', 'M', (2,2),'AN'],
['SVC01.02', 'M', 30,'AN'],
['SVC01.03', 'C', (2,2),'AN'],
['SVC01.04', 'C', (2,2),'AN'],
['SVC01.05', 'C', (2,2),'AN'],
['SVC01.06', 'C', (2,2),'AN'],
['SVC01.07', 'C', 80,'AN'],
]],
['SVC02', 'M', 15,'R'],
['SVC03', 'M', 15,'R'],
['SVC04', 'C', 30,'AN'],
['SVC05', 'C', 15,'R'],
['SVC06', 'C', [
['SVC06.01', 'M', (2,2),'AN'],
['SVC06.02', 'M', 30,'AN'],
['SVC06.03', 'C', (2,2),'AN'],
['SVC06.04', 'C', (2,2),'AN'],
['SVC06.05', 'C', (2,2),'AN'],
['SVC06.06', 'C', (2,2),'AN'],
['SVC06.07', 'C', 80,'AN'],
]],
['SVC07', 'C', 15,'R'],
],
'SW': [
['BOTSID', 'M', 3,'AN'],
['SW01', 'M', 1,'AN'],
['SW02', 'M', (2,3),'AN'],
['SW03', 'M', (4,4),'AN'],
['SW04', 'M', 10,'AN'],
['SW05', 'C', (2,4),'AN'],
['SW06', 'C', (2,2),'AN'],
['SW07', 'C', 7,'R'],
['SW08', 'C', 5,'AN'],
['SW09', 'C', 6,'R'],
],
'T1': [
['BOTSID', 'M', 3,'AN'],
['T101', 'M', 6,'R'],
['T102', 'C', 6,'R'],
['T103', 'C', (6,6),'DT'],
['T104', 'C', (2,4),'AN'],
['T105', 'C', (2,30),'AN'],
['T106', 'C', (2,2),'AN'],
['T107', 'C', (6,9),'AN'],
['T108', 'C', 6,'AN'],
['T109', 'C', 3,'AN'],
],
'T2': [
['BOTSID', 'M', 3,'AN'],
['T201', 'M', 6,'R'],
['T202', 'C', 50,'AN'],
['T203', 'C', 10,'R'],
['T204', 'C', 2,'AN'],
['T205', 'C', 9,'R'],
['T206', 'C', (2,2),'AN'],
['T207', 'C', 9,'R'],
['T208', 'C', (2,2),'AN'],
['T209', 'C', (2,30),'AN'],
['T210', 'C', (2,30),'AN'],
['T211', 'C', (2,4),'N2'],
['T212', 'C', (2,4),'N2'],
],
'T3': [
['BOTSID', 'M', 3,'AN'],
['T301', 'M', 6,'R'],
['T302', 'M', (2,4),'AN'],
['T303', 'C', 2,'AN'],
['T304', 'C', (2,30),'AN'],
['T305', 'C', (6,9),'AN'],
['T306', 'C', 4,'AN'],
['T307', 'C', 10,'AN'],
],
'T6': [
['BOTSID', 'M', 3,'AN'],
['T601', 'M', 6,'R'],
['T602', 'C', 9,'R'],
['T603', 'C', (2,2),'AN'],
['T604', 'C', (2,30),'AN'],
['T605', 'C', 9,'R'],
['T606', 'C', (2,2),'AN'],
['T607', 'C', (2,30),'AN'],
],
'T8': [
['BOTSID', 'M', 3,'AN'],
['T801', 'M', 6,'R'],
['T802', 'M', 80,'AN'],
],
'TA1': [
['BOTSID', 'M', 3,'AN'],
['TA101', 'M', (9,9),'R'],
['TA102', 'M', (6,6),'DT'],
['TA103', 'M', (4,4),'TM'],
['TA104', 'M', 1,'AN'],
['TA105', 'M', (3,3),'AN'],
],
'TAX': [
['BOTSID', 'M', 3,'AN'],
['TAX01', 'C', 20,'AN'],
['TAX02', 'C', 2,'AN'],
['TAX03', 'C', 30,'AN'],
['TAX04', 'C', 2,'AN'],
['TAX05', 'C', 30,'AN'],
['TAX06', 'C', 2,'AN'],
['TAX07', 'C', 30,'AN'],
['TAX08', 'C', 2,'AN'],
['TAX09', 'C', 30,'AN'],
['TAX10', 'C', 2,'AN'],
['TAX11', 'C', 30,'AN'],
['TAX12', 'C', 1,'AN'],
],
'TBA': [
['BOTSID', 'M', 3,'AN'],
['TBA01', 'C', (2,2),'AN'],
['TBA02', 'C', 15,'R'],
['TBA03', 'C', 10,'R'],
],
'TC1': [
['BOTSID', 'M', 3,'AN'],
['TC101', 'M', 16,'AN'],
['TC102', 'M', 3,'R'],
['TC103', 'M', 45,'AN'],
['TC104', 'M', (3,3),'AN'],
['TC105', 'M', 3,'R'],
['TC106', 'C', 16,'AN'],
],
'TC2': [
['BOTSID', 'M', 3,'AN'],
['TC201', 'M', 1,'AN'],
['TC202', 'M', 16,'AN'],
],
'TCD': [
['BOTSID', 'M', 3,'AN'],
['TCD01', 'C', 11,'AN'],
['TCD02', 'C', (6,6),'DT'],
['TCD03', 'C', (4,8),'TM'],
['TCD04', 'C', 2,'AN'],
['TCD05', 'C', 30,'AN'],
['TCD06', 'C', (2,2),'AN'],
['TCD07', 'C', 2,'AN'],
['TCD08', 'C', 30,'AN'],
['TCD09', 'C', (2,2),'AN'],
['TCD10', 'C', 10,'R'],
['TCD11', 'C', 10,'R'],
['TCD12', 'C', 15,'R'],
['TCD13', 'C', 15,'R'],
['TCD14', 'C', 15,'R'],
['TCD15', 'C', 15,'R'],
['TCD16', 'C', 1,'AN'],
],
'TD1': [
['BOTSID', 'M', 3,'AN'],
['TD101', 'C', (5,5),'AN'],
['TD102', 'C', 7,'R'],
['TD103', 'C', 1,'AN'],
['TD104', 'C', 16,'AN'],
['TD105', 'C', 50,'AN'],
['TD106', 'C', 2,'AN'],
['TD107', 'C', 10,'R'],
['TD108', 'C', (2,2),'AN'],
],
'TD3': [
['BOTSID', 'M', 3,'AN'],
['TD301', 'M', (2,2),'AN'],
['TD302', 'C', 4,'AN'],
['TD303', 'C', 10,'AN'],
['TD304', 'C', 2,'AN'],
['TD305', 'C', 10,'R'],
['TD306', 'C', (2,2),'AN'],
['TD307', 'C', 1,'AN'],
['TD308', 'C', (2,2),'AN'],
['TD309', 'C', (2,15),'AN'],
],
'TD4': [
['BOTSID', 'M', 3,'AN'],
['TD401', 'C', (2,3),'AN'],
['TD402', 'C', 1,'AN'],
['TD403', 'C', (2,4),'AN'],
['TD404', 'C', 80,'AN'],
],
'TD5': [
['BOTSID', 'M', 3,'AN'],
['TD501', 'C', 2,'AN'],
['TD502', 'C', 2,'AN'],
['TD503', 'C', (2,17),'AN'],
['TD504', 'C', 2,'AN'],
['TD505', 'C', 35,'AN'],
['TD506', 'C', (2,2),'AN'],
['TD507', 'C', 2,'AN'],
['TD508', 'C', 30,'AN'],
['TD509', 'C', (2,2),'AN'],
['TD510', 'C', (2,2),'AN'],
['TD511', 'C', 4,'R'],
['TD512', 'C', (2,2),'AN'],
],
'TDS': [
['BOTSID', 'M', 3,'AN'],
['TDS01', 'M', 10,'N2'],
['TDS02', 'C', 10,'N2'],
['TDS03', 'C', 10,'N2'],
['TDS04', 'C', 10,'N2'],
],
'TED': [
['BOTSID', 'M', 3,'AN'],
['TED01', 'M', 3,'AN'],
['TED02', 'C', 60,'AN'],
['TED03', 'C', (2,3),'AN'],
['TED04', 'C', 6,'R'],
['TED05', 'C', 2,'R'],
['TED06', 'C', 4,'R'],
['TED07', 'C', 99,'AN'],
['TED08', 'C', 99,'AN'],
],
'TF': [
['BOTSID', 'M', 3,'AN'],
['TF01', 'M', 4,'AN'],
['TF02', 'M', 7,'AN'],
['TF03', 'C', 2,'AN'],
['TF04', 'C', 4,'AN'],
],
'TFA': [
['BOTSID', 'M', 3,'AN'],
['TFA01', 'M', (2,2),'AN'],
['TFA02', 'C', 9,'R'],
['TFA03', 'C', 9,'R'],
| |
code-block:: python
'''
with fluid.layers.Switch() as switch:
with switch.case(cond1):
i = fluid.layers.fill_constant(shape=[1], dtype='int64', value=1)
with switch.case(cond2):
i = fluid.layers.fill_constant(shape=[1], dtype='int64', value=2)
with switch.default():
i = fluid.layers.fill_constant(shape=[1], dtype='int64', value=0)
'''
Args:
name(str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name` .
Examples:
.. code-block:: python
import paddle.fluid as fluid
lr = fluid.layers.create_global_var(
shape=[1],
value=0.0,
dtype='float32',
persistable=True,
name="learning_rate")
zero_var = fluid.layers.fill_constant(
shape=[1], dtype='float32', value=0.0)
one_var = fluid.layers.fill_constant(
shape=[1], dtype='float32', value=1.0)
two_var = fluid.layers.fill_constant(
shape=[1], dtype='float32', value=2.0)
global_step = fluid.layers.autoincreased_step_counter(counter_name='@LR_DECAY_COUNTER@', begin=0, step=1)
with fluid.layers.control_flow.Switch() as switch:
with switch.case(global_step == zero_var):
fluid.layers.assign(input=one_var, output=lr)
with switch.default():
fluid.layers.assign(input=two_var, output=lr)
exe = fluid.Executor(fluid.CPUPlace())
exe.run(fluid.default_startup_program())
res = exe.run(fluid.default_main_program(), feed={}, fetch_list=[lr])
print(res) # [array([1.], dtype=float32)]
"""
def __init__(self, name=None):
self.helper = LayerHelper('switch', name=name)
self.inside_scope = False
self.pre_not_conditions = []
def case(self, condition):
if not self.inside_scope:
raise ValueError("case should be called inside with")
check_variable_and_dtype(
condition, 'condition', ['bool'],
'the member function case of fluid.layers.Switch')
if len(self.pre_not_conditions) == 0:
cond_block = ConditionalBlock([condition], is_scalar_condition=True)
not_cond = logical_not(x=condition)
self.pre_not_conditions.append(not_cond)
else:
pre_cond_num = len(self.pre_not_conditions)
pre_not_cond = self.pre_not_conditions[pre_cond_num - 1]
new_not_cond = logical_and(
x=pre_not_cond, y=logical_not(x=condition))
self.pre_not_conditions.append(new_not_cond)
cond_block = ConditionalBlock(
[logical_and(
x=pre_not_cond, y=condition)],
is_scalar_condition=True)
return ConditionalBlockGuard(cond_block)
def default(self):
pre_cond_num = len(self.pre_not_conditions)
if pre_cond_num == 0:
raise ValueError("there should be at least one condition")
cond_block = ConditionalBlock(
[self.pre_not_conditions[pre_cond_num - 1]],
is_scalar_condition=True)
return ConditionalBlockGuard(cond_block)
def __enter__(self):
"""
set flag that now is inside switch.block {}
:return:
"""
self.inside_scope = True
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.inside_scope = False
if exc_type is not None:
return False # re-raise exception
return True
class IfElseBlockGuard(object):
def __init__(self, is_true, ifelse):
if not isinstance(ifelse, IfElse):
raise TypeError("ifelse must be an instance of IfElse class")
if ifelse.status != IfElse.OUT_IF_ELSE_BLOCKS:
raise ValueError("You cannot invoke IfElse.block() inside a block")
self.is_true = is_true
self.ie = ifelse
if is_true:
self.cond_block = ifelse.conditional_true_block
else:
self.cond_block = ifelse.conditional_false_block
if not isinstance(self.cond_block, ConditionalBlock):
raise TypeError("Unexpected situation")
self.cond_block = self.cond_block.block()
def __enter__(self):
self.ie.status = IfElse.IN_IF_ELSE_TRUE_BLOCKS if self.is_true else IfElse.IN_IF_ELSE_FALSE_BLOCKS
self.cond_block.__enter__()
def __exit__(self, exc_type, exc_val, exc_tb):
if not self.cond_block.__exit__(exc_type, exc_val, exc_tb):
# re-raise inside exception
return False
if len(self.ie.output_table[1 if self.is_true else 0]) == 0:
raise ValueError("Must set output inside block")
self.ie.status = IfElse.OUT_IF_ELSE_BLOCKS
class IfElse(object):
"""
:api_attr: Static Graph
This class is used to implement IfElse branch control function. IfElse contains two blocks, true_block and false_block. IfElse will put data satisfying True or False conditions into different blocks to run.
Cond is a 2-D Tensor with shape [N, 1] and data type bool, representing the execution conditions of the corresponding part of the input data.
Note:
A new OP :ref:`api_fluid_layers_cond` is highly recommended instead of ``IfElse``. if the shape of parameter ``cond`` is [1].
OP :ref:`api_fluid_layers_cond` is easier to use and is called with less code but does the same thing as ``IfElse`` .
IfElse OP is different from other OPs in usage, which may cause some users confusion. Here is a simple example to illustrate this OP.
.. code-block:: python
# The following code completes the function: subtract 10 from the data greater than 0 in x, add 10 to the data less than 0 in x, and sum all the data.
import numpy as np
import paddle.fluid as fluid
x = fluid.layers.data(name='x', shape=[4, 1], dtype='float32', append_batch_size=False)
y = fluid.layers.data(name='y', shape=[4, 1], dtype='float32', append_batch_size=False)
x_d = np.array([[3], [1], [-2], [-3]]).astype(np.float32)
y_d = np.zeros((4, 1)).astype(np.float32)
# Compare the size of x, y pairs of elements, output cond, cond is shape [4, 1], data type bool 2-D tensor.
# Based on the input data x_d, y_d, it can be inferred that the data in cond are [[true], [true], [false], [false]].
cond = fluid.layers.greater_than(x, y)
# Unlike other common OPs, ie below returned by the OP is an IfElse OP object
ie = fluid.layers.IfElse(cond)
with ie.true_block():
# In this block, according to cond condition, the data corresponding to true dimension in X is obtained and subtracted by 10.
out_1 = ie.input(x)
out_1 = out_1 - 10
ie.output(out_1)
with ie.false_block():
# In this block, according to cond condition, get the data of the corresponding condition in X as false dimension, and add 10
out_1 = ie.input(x)
out_1 = out_1 + 10
ie.output(out_1)
# According to cond condition, the data processed in the two blocks are merged. The output here is output, the type is List, and the element type in List is Variable.
output = ie() # [array([[-7.], [-9.], [ 8.], [ 7.]], dtype=float32)]
# Get the first Variable in the output List and add all elements.
out = fluid.layers.reduce_sum(output[0])
exe = fluid.Executor(fluid.CPUPlace())
exe.run(fluid.default_startup_program())
res = exe.run(fluid.default_main_program(), feed={"x":x_d, "y":y_d}, fetch_list=[out])
print(res)
# [array([-1.], dtype=float32)]
Args:
cond (Variable): cond is a 2-D Tensor with shape [N, 1] and data type bool, representing the corresponding execution conditions of N input data. The data type is bool.
name(str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name` .
Returns:
Unlike other common OPs, the OP call returns an IfElse OP object (e.g. ie in the example), which branches the input data by calling the internal functions of the object ``true_block ()``, ``false_block ()``, ``input ()``, ``output ()``, and integrates the data processed by different branches as the overall output by calling the internal ``call ()`` function. The output type is a list, and the type of each element in the list is Variable.
Internal Functions:
The block is constructed by calling the ``with ie. true_block()`` function in the object, and the computational logic under condition true is put into the block. If no corresponding block is constructed, the input data in the corresponding conditional dimension is unchanged.
The block is constructed by calling the ``with ie. false_block()`` function in the object, and the computational logic under condition false is put into the block. If no corresponding block is constructed, the input data in the corresponding conditional dimension is unchanged.
``Out = ie. input (x)`` will take out the data of the corresponding conditional dimension in X and put it into out, supporting the internal processing of multiple inputs in block.
``ie. output (out)`` writes the result to the output of the corresponding condition.
There is a ``call ()`` function inside the object, that is, by calling ``output = ie ()``, all the outputs inside the block of False are fused as the whole output, the output type is a list, and the type of each element in the list is Variable.
"""
OUT_IF_ELSE_BLOCKS = 0
IN_IF_ELSE_TRUE_BLOCKS = 1
IN_IF_ELSE_FALSE_BLOCKS = 2
def __init__(self, cond, name=None):
check_type(cond, "cond", Variable, "fluid.layers.IfElse")
check_type(name, "name", (str, type(None)), "fluid.layers.IfElse")
self.helper = LayerHelper('ifelse', name=name)
self.cond = cond
self.input_table = {}
self.status = IfElse.OUT_IF_ELSE_BLOCKS
self.conditional_true_block = ConditionalBlock(inputs=[self.cond])
self.conditional_false_block = ConditionalBlock(inputs=[self.cond])
self.output_table = ([], []) # (true_outs, false_outs)
def input(self, x):
if self.status == IfElse.OUT_IF_ELSE_BLOCKS:
raise ValueError("input must in true/false blocks")
if id(x) not in self.input_table:
parent_block = self._parent_block()
out_true = parent_block.create_var(
name=unique_name.generate_with_ignorable_key('ifelse_input' +
self.helper.name),
dtype=x.dtype)
out_false = parent_block.create_var(
name=unique_name.generate_with_ignorable_key('ifelse_input' +
self.helper.name),
dtype=x.dtype)
parent_block.append_op(
type='split_lod_tensor',
inputs={
'X': x,
'Mask': self.cond,
},
outputs={'OutTrue': out_true,
'OutFalse': out_false},
attrs={'level': 0})
self.input_table[id(x)] = (out_true, out_false)
else:
out_true, out_false = self.input_table[id(x)]
if self.status == IfElse.IN_IF_ELSE_TRUE_BLOCKS:
return out_true
else:
return out_false
def _parent_block(self):
current_block = self.helper.main_program.current_block()
return self.helper.main_program.block(current_block.parent_idx)
def true_block(self):
return IfElseBlockGuard(True, self)
def false_block(self):
return IfElseBlockGuard(False, self)
def output(self, *outs):
if self.status == self.OUT_IF_ELSE_BLOCKS:
raise ValueError("output can only be invoked in the sub-block")
out_table = self.output_table[1 if self.status ==
self.IN_IF_ELSE_TRUE_BLOCKS else 0]
parent_block = self._parent_block()
for each_out in outs:
check_type(each_out, "each output", Variable,
"fluid.layers.IfElse.output")
# create outside tensor
outside_out = parent_block.create_var(
name=unique_name.generate_with_ignorable_key("_".join(
[self.helper.name, 'output'])),
dtype=each_out.dtype)
out_table.append(outside_out)
# assign local var to outside
assign(input=each_out, output=outside_out)
def __call__(self):
if self.status != self.OUT_IF_ELSE_BLOCKS:
raise ValueError("IfElse::__call__ must be out of sub-block")
false_len, true_len = list(map(len, self.output_table))
if false_len == | |
<filename>src/classify.py
import csv, os, json, argparse, sys, random
from rdkit import Chem
"""
Load data from directory, classify given molecule or test file
"""
# Walk reverse ontology subgraph (root = hitem) upwards, depth first. If any member of hitemset
# is encountered, add it to minor set. These are later discarded, leaving only the most
# specialized hits
def walk_ont(sitems, hitemset, hitem, minors):
ch = sitems.get(hitem)
if ch is None:
return
for c in ch:
if c in hitemset:
minors.add(c)
walk_ont(sitems, hitemset, c, minors)
# Walk reverse ontology subgraph (root = node) upwards, breadth first. The first encountered
# GO process is returned.
def walk_ont_go(sitems, node, gos):
visited = set() # List to keep track of visited nodes.
queue = [] #Initialize a queue
visited.add(node)
queue.append(node)
while queue:
s = queue.pop(0)
go = gos.get(s)
if go is not None:
return go
ch = sitems.get(s)
if ch is None:
continue
for c in ch:
if c not in visited:
visited.add(c)
queue.append(c)
return None
# Walk reverse ontology subgraph (root = hitem) upwards, depth first.
# Return True if citem is encountered (i.e. citem is superclass of hitem)
def walk_find(sitems, hitem, citem):
ch = sitems.get(hitem)
# print('{} {} {}'.format(hitem, citem, ch))
if ch is None:
return False
if citem in ch:
return True
for c in ch:
if walk_find(sitems, c, citem):
return True
return False
# Walk reverse ontology subgraph (root = hitem) upwards, depth first.
# If one of items in list is encountered return full path to it
path = []
def walk_find_list(sitems, hitem, item_set):
global path
ch = sitems.get(hitem)
# print('{} {} {}'.format(hitem, citem, ch))
if ch is None:
return False
for c in ch:
if c is None:
continue
if c in item_set:
path.append(str(c))
return True
if walk_find_list(sitems, c, item_set):
path.append(c)
return True
return False
# Find path to topmost natural product, return as array of names
def path_to_nproot(hit, nplist):
global path
if hit in nplist:
return [hit]
path = []
success = walk_find_list(sitems, hit, set(nplist))
if not success:
return []
path.append(hit)
return path
ctr = 0
def is_elem(e, s):
global ctr
ctr = ctr + 1
return e in s
# Walk ontology downwards, breadth first, starting from given root, add children to set
def walk_collect_npclasses(edges, node, collection):
visited = set() # List to keep track of visited nodes.
queue = [] #Initialize a queue
visited.add(node)
queue.append(node)
while queue:
s = queue.pop(0)
e = edges.get(s)
if e is None:
continue
for c in e:
if c not in visited:
visited.add(c)
queue.append(c)
collection |= visited
#rule A-1
def check_unspec_alkaloid(mol):
#ring-N
if not mol.HasSubstructMatch(Chem.MolFromSmarts('[#7R]')):
return False
#!peptide
if mol.HasSubstructMatch(Chem.MolFromSmarts('[$([NR0]),$([Nr;!r3;!r4;!r5;!r6;!r7;!r8])]~C(~[OX1H0,OX2H1])')):
return False
#!tetrapyrroles
if mol.HasSubstructMatch(Chem.MolFromSmarts('[#6]1~[#6]~[#6]2~[#6]~[#6]3~[#6]~[#6]~[#6](~[#6]~[#6]4~[#6]~[#6]~[#6](~[#6]~[#6]5~[#6]~[#6]~[#6](~[#6]~[#6]~1~[#7]~2)~[#7]~5)~[#7]~4)~[#7]~3')):
return False
if mol.HasSubstructMatch(Chem.MolFromSmarts('[#6]1~[#6]~[#7]~[#6](~[#6R0]~[#6]2~[#6]~[#6]~[#6](~[#6R0]~[#6]3~[#6]~[#6]~[#6](~[#6R0]~[#6]4~[#6]~[#6]~[#6]~[#7]~4)~[#7]~3)~[#7]~2)~[#6]~1')):
return False
#!nucleotides
if mol.HasSubstructMatch(Chem.MolFromSmarts('POCC1CCC(n2cnc3cncnc32)O1')):
return False
if mol.HasSubstructMatch(Chem.MolFromSmarts('POCC2CCC([#7R1]1~[#6R1]~[#7R1]~[#6R1]~[#6R1]~[#6R1]~1)O2')):
return False
#!diketopiperazine
if mol.HasSubstructMatch(Chem.MolFromSmarts('[#6]1~[#7]~[#6](~O)~[#6]~[#7]~[#6](~O)~1')):
return False
return True
#rule H-1
def check_unspec_hydrocarbon(mol):
# aliphatic
if not mol.HasSubstructMatch(Chem.MolFromSmarts('C')):
return False
if mol.HasSubstructMatch(Chem.MolFromSmarts('[c,#7,#8,#16,F,Cl,Br,I,P,As,Se]')):
return False
return True
def check_acyclic_ketone(mol):
# aliphatic
if not mol.HasSubstructMatch(Chem.MolFromSmarts('CC(=O)C')):
return False
if mol.HasSubstructMatch(Chem.MolFromSmarts('[CR,c,#7,o,#16,F,Cl,Br,I,P,As,Se]')):
return False
if mol.HasSubstructMatch(Chem.MolFromSmarts('O~CC(~O)C~O')):
return False
return True
def check_acyclic_aldehyde(mol):
if not mol.HasSubstructMatch(Chem.MolFromSmarts('O=[CX3H1]C')):
return False
if mol.HasSubstructMatch(Chem.MolFromSmarts('[CR,c,#7,o,#16,F,Cl,Br,I,P,As,Se]')):
return False
if mol.HasSubstructMatch(Chem.MolFromSmarts('[$(O);!$(O=C(C)C);!$(O=[CX3H1]C)]')):
return False
return True
def check_acyclic_epoxide(mol):
if not mol.HasSubstructMatch(Chem.MolFromSmarts('[OX2H0]1[CR1][CR1]1')):
return False
if mol.HasSubstructMatch(Chem.MolFromSmarts('[C;r{4-}]')):
return False
if mol.HasSubstructMatch(Chem.MolFromSmarts('[c,#7,o,#16,F,Cl,Br,I,P,As,Se]')):
return False
return True
def check_acyclic_ether(mol):
if not mol.HasSubstructMatch(Chem.MolFromSmarts('O([$(C);!$(C(~O)O)])[$(C);!$(C(~O)O)]')):
return False
if mol.HasSubstructMatch(Chem.MolFromSmarts('[CR,c,#7,o,#16,F,Cl,Br,I,P,As,Se]')):
return False
return True
def check_acyclic_ester(mol):
if not mol.HasSubstructMatch(Chem.MolFromSmarts('C[CX3](=O)[OX2H0][$(C);!$(C=O)]')):
return False
if mol.HasSubstructMatch(Chem.MolFromSmarts('[CR,c,#7,o,#16,F,Cl,Br,I,P,As,Se]')):
return False
return True
def check_acyclic_alcohol(mol):
if not mol.HasSubstructMatch(Chem.MolFromSmarts('[$(C);!$(C~O)]~[$(C);!$(C=O)][OX2H1]')):
return False
if mol.HasSubstructMatch(Chem.MolFromSmarts('[CR,c,#7,o,#16,F,Cl,Br,I,P,As,Se]')):
return False
if mol.HasSubstructMatch(Chem.MolFromSmarts('OCC(O)C~O')):
return False
return True
def check_acyclic_peroxide(mol):
if not mol.HasSubstructMatch(Chem.MolFromSmarts('[OX2][OX2]')):
return False
if mol.HasSubstructMatch(Chem.MolFromSmarts('[CR,c,#7,o,#16,F,Cl,Br,I,P,As,Se]')):
return False
return True
def check_fatty_acid_anion(mol):
if not mol.HasSubstructMatch(Chem.MolFromSmarts('[CX3](=O)[OX1H0-]')):
return False
if not mol.HasSubstructMatch(Chem.MolFromSmarts('C~C~C~C')):
return False
if mol.HasSubstructMatch(Chem.MolFromSmarts('[CR,c,#7,o,S,P,As]')):
return False
return True
def check_fatty_acid(mol):
if not mol.HasSubstructMatch(Chem.MolFromSmarts('[CX3](=O)[OX2H1]')):
return False
if not mol.HasSubstructMatch(Chem.MolFromSmarts('C~C~C~C')):
return False
if mol.HasSubstructMatch(Chem.MolFromSmarts('[CR,c,#7,o,S,P,As]')):
return False
return True
#rule M-1
def check_unspec_macrolide(mol):
#8+ macro lactone
if not mol.HasSubstructMatch(Chem.MolFromSmarts('[OR1;r{8-}][CR1;r{8-}](=O)')):
return False
#!macrocyclic benzene
if mol.HasSubstructMatch(Chem.MolFromSmarts('[cR]([C,O;r{9-}])1[cR]([#6,O])[cR1][cR1][cR1][cR1]1')):
return False
if mol.HasSubstructMatch(Chem.MolFromSmarts('[cR]([C,O;r{9-}])1[cR][cR]([#6,O])[cR1][cR1][cR1]1')):
return False
#!cyclopeptide
if mol.HasSubstructMatch(Chem.MolFromSmarts('[NR]~[CR](~[OX1H0,OX2H1])[CR][NR;r{7-}]~[CR](~[OX1H0,OX2H1])')):
return False
#!cyclodepsipeptide
if mol.HasSubstructMatch(Chem.MolFromSmarts('[CR](=O)[NR][CR][CR](=O)[OR][CR][CR](=O)[NR][CR][CR](=O)[OR]')):
return False
return True
# Try to match ALL patterns. Remove redundant. Return remaining.
def get_hits(mol, silent=False):
if not silent:
print('collecting hits...')
hits = {}
# match the InChI keys
ik = Chem.MolToInchiKey(mol)
it = iks.get(ik)
if it is not None:
go = ''
g = gos.get(it)
if g is not None:
go = g
hits[it] = (it, labels.get(it), ik)
if args.rawhits:
print('{} {} {} {}'.format(it, labels.get(it), ik, go))
else:
it = ik1s.get(ik[:14])
if it is not None:
go = ''
g = gos.get(it)
if g is not None:
go = g
hits[it] = (it, labels.get(it), ik[:14])
if args.rawhits:
print('{} {} {} {}'.format(it, labels.get(it), ik[:14], go))
ps_default = Chem.AdjustQueryParameters()
ps_default.adjustDegreeFlags = Chem.AdjustQueryWhichFlags.ADJUST_IGNOREDUMMIES | Chem.AdjustQueryWhichFlags.ADJUST_IGNORECHAINS
ps_ignoreDummies = Chem.AdjustQueryParameters()
ps_ignoreDummies.adjustDegreeFlags = Chem.AdjustQueryWhichFlags.ADJUST_IGNOREDUMMIES
for it,itsuplist in sitems.items():
sm = smiles.get(it)
smas = smarts.get(it)
if sm is None and smas is None:
continue
if (not (sm is None or len(sm) == 0)) and sm.find('*') < 0:
if sm.find('@') >= 0:
continue
# this class has a non-wildcard SMILES
try:
q = Chem.MolFromSmiles(sm)
pats = [Chem.AdjustQueryProperties(q, ps_ignoreDummies)]
except Exception:
continue
elif not smas is None:
# this class has a wildcard SMILES or a SMARTS
#print('---{} {} {}'.format(it, labels.get(it), sma))
try:
pats = [Chem.MolFromSmarts(sma) for sma in smas]
#pat = Chem.AdjustQueryProperties(pat, ps_default)
#if it == 'Q12748271':
# print(mol.HasSubstructMatch(pat))
# exit()
#print('====={} {}'.format(it, sma))
except Exception:
continue
else:
try:
q = Chem.MolFromSmiles(sm)
pats = [Chem.AdjustQueryProperties(q, ps_ignoreDummies)]
except Exception:
continue
if pats is None:
print('========defect pattern: {} {}'.format(it, sma))
continue
for p in pats:
if mol.HasSubstructMatch(p):
go = ''
g = gos.get(it)
if g is not None:
go = g
hits[it] = (it, labels.get(it), sm)
if args.rawhits:
print('{} {} {} {}'.format(it, labels.get(it), sm, go))
break
if check_unspec_hydrocarbon(mol):
hits['Q109910560'] = ('Q109910560', 'unspecified hydrocarbon', 'Rule H-1')
else:
if check_fatty_acid_anion(mol):
hits['Q71653081'] = ('Q71653081', 'fatty acid anion', 'Rule F-8')
if check_fatty_acid(mol):
hits['Q61476'] = ('Q61476', 'fatty acid', 'Rule F-9')
if check_acyclic_aldehyde(mol):
hits['Q109923365'] = ('Q109923365', 'acyclic aldehyde', 'Rule F-2')
if check_acyclic_ether(mol):
hits['Q109923862'] = ('Q109923862', 'acyclic ether', 'Rule F-4')
if check_acyclic_epoxide(mol):
hits['Q109923685'] = ('Q109923685', 'acyclic epoxide', 'Rule F-3')
if check_acyclic_ketone(mol):
hits['Q109911294'] = ('Q109911294', 'acyclic ketone', 'Rule F-1')
if check_acyclic_ester(mol):
hits['Q109923912'] = ('Q109923912', 'acyclic ester', 'Rule F-5')
if check_acyclic_alcohol(mol):
hits['Q378871'] = ('Q378871', 'fatty alcohol', 'Rule F-6')
if check_acyclic_peroxide(mol):
hits['Q109946855'] = ('Q109946855', 'acyclic peroxide', 'Rule F-7')
if check_unspec_alkaloid(mol):
hits['Q70702'] = ('Q70702', 'unspecified alkaloid', 'Rule A-1')
if check_unspec_macrolide(mol):
hits['Q422687'] = ('Q422687', 'unspecified macrolide', 'Rule M-1')
if not silent:
print('purging redundant hits')
hitemset = set(hits.keys())
minors = set()
for hit in hitemset:
walk_ont(sitems, hitemset, hit, minors)
for m in minors:
hits.pop(m)
return hits
# Initiate the parser
parser = argparse.ArgumentParser()
parser.add_argument("-d", "--data", help="data directory", required=True)
parser.add_argument("-m", "--molecule", help="molecule to classify (SMILES/InChi)")
parser.add_argument("-a", "--rawhits", help="output all hits before hit processing starts",
action="store_true")
parser.add_argument("-t", "--testfile", help="classify all InChis from file, first line may contain check item", type=str)
parser.add_argument("-n", "--nptest", help="Load N random NP Inchis from WD and classify", action="store_true")
parser.add_argument("-j", "--json", help="together with -m outputs JSON formatted result", action="store_true")
parser.add_argument("-N", "--natural", help="prune ontology to only include natural products", action="store_true")
parser.add_argument("--list_nproots", help="list of topmost natural products classes", action="store_true")
# Read arguments from the command line
args = parser.parse_args()
is_normal_run = not (args.molecule is None) or not(args.testfile is None) or args.nptest
is_extra_service = args.list_nproots
if not is_normal_run and not is_extra_service:
print('One of -m or -t or -n is needed')
parser.print_usage()
exit()
silent = args.json
if not silent:
print(args)
if is_normal_run and args.testfile is None and args.nptest is False:
if args.molecule.find('InChI=') >= 0:
mol = Chem.MolFromInchi(args.molecule)
else:
mol = Chem.MolFromSmiles(args.molecule)
iks = {}
ik1s = {}
labels = {}
smiles = {}
smarts = {}
# read bio process data
with open(args.data + 'data-biosyn-classes.json', 'r') as f:
if not silent:
print('reading biosyn class data')
s = f.read()
jol = json.loads(s)
gos = {}
for d in jol:
dd = d.get('item')
it = dd.get('value')
ik = d.get('p235')
if ik is not None:
iks[ik] = it
if ik[15:25] == 'UHFFFAOYSA':
ik1s[ik[:14]] = it
else:
sm = None
p233 = None
p2017 = None
p233 = d.get('p233')
p2017 = d.get('p2017')
if p2017 is not None and len(p2017) > 0:
sm = p2017
elif p233 is not None and len(p233) > 0:
sm = p233
if sm is not None:
smiles[it] = sm
p8533 = d.get('p8533')
if p8533 is not None and | |
"""
Решения задач пробного раунда онлайн-чемпионата по программированию Yandex Сup в категории Алгоритм.
https://contest.yandex.ru/yacup/
"""
from time import time
def A():
"""
На различных мероприятиях команда стажировок регулярно разыгрывает призы в лотерею.
Организаторы выбирают 10 случайных различных чисел от 1 до 32.
Каждому участнику выдается лотерейный билет, на котором записаны 6
различных чисел от 1 до 32. Билет считается выигрышным, если в нем есть не менее 3
выбранных организаторами числа.
Напишите программу, которая будет сообщать, какие билеты выигрышные.
Сатус: принято
"""
with open('input.txt', 'r') as f_in, open('output.txt', 'w') as f_out:
lototron = set(f_in.readline().split())
n = int(f_in.readline())
for _ in range(n):
ticket = set(f_in.readline().split())
if len(ticket & lototron) >= 3:
res = 'Lucky'
else:
res = 'Unlucky'
f_out.write(res+'\n')
def B():
"""
Для каждой строки в своей базе данных найти самую короткую её подстроку,
состоящую хотя бы из двух символов и являющуюся палиндромом. Если таких
подстрок несколько, выбрать лексикографически минимальную.
Сатус: принято
"""
with open('input.txt', 'r') as f_in, open('output.txt', 'w') as f_out:
data = f_in.read()
# полиндромы длиной > 3 обязаны содержать в себе полиндром меньшей длины
# неободимо проверить полиндромы только длины 2 и 3
res = min(
(a+b for a, b in zip(data, data[1:]) if a == b),
default=''
)
if not res:
res = min(
(a+b+c for a, b, c in zip(data, data[1:], data[2:]) if a == c),
default='-1'
)
f_out.write(res)
def C():
"""
На плоскости n фигур (круги и прямоугольники). Возможно ли провести одну
прямую, которая разделит каждую фигуру пополам (на две фигуры равной площади)?
Сатус: принято
"""
def cross_prod(c1, c2, c3):
"""Для трёх точек на плоскости считаем векторное произведение"""
a1, a2 = c2[0] - c1[0], c2[1] - c1[1]
b1, b2 = c3[0] - c1[0], c3[1] - c1[1]
return a1*b2 - a2*b1
with open('input.txt', 'r') as f_in, open('output.txt', 'w') as f_out:
n = int(f_in.readline())
centers = []
for _ in range(n):
target_type, *data = map(int, f_in.readline().split())
# прямая делит круг и прямоугольник на две равных части тогда и только
# тогда, когда проходит через центр фигуры
# координаты центров умножим на 2, чтоб работать только с целыми числами
if target_type == 0: # круг
center = data[1] * 2, data[2] * 2
else: # прямоугольник
center = data[0] + data[4], data[1] + data[5]
if len(centers) == 0:
centers.append(center)
elif len(centers) < 2 and centers[0] != center:
# если мы добавим две одинаковые точки, то будем проводить
# прямую через 2 точки, что сделать всегда возможно
centers.append(center)
elif len(centers) == 2 and cross_prod(center, *centers) != 0:
# три точки лежат на одной прямой тогда и только тогда, когда
# векторное произведение двух векторов равно нулю
res = 'No'
break
else:
res = 'Yes'
f_out.write(res)
def D():
"""
Даны массив a длины n и массив b длины m. Все элементы обоих массивов — цифры
от 0 до 9 включительно. Составим по этим массивам таблицу c размера n×m, где
элемент в i-й строке и j-м столбце определяется формулой ci,j=ai⋅10^9+bj.
Рассмотрим всевозможные пути из клетки c1,1 в клетку cn,m, состоящие только
из перемещений вниз и вправо. Среди всех этих путей выберите такой, что сумма
чисел в посещённых клетках максимальна, и выведите эту сумму.
Сатус: принято
"""
with open('input.txt', 'r') as f_in, open('output.txt', 'w') as f_out:
n, m = map(int, f_in.readline().split())
a = list(map(int, f_in.readline().split()))
b = list(map(int, f_in.readline().split()))
# количество посещенных клеток не зависит от пути
# вес одного элемента a больше, чем всего массива b, тогда:
# -мы должны идти вдоль максимальных элементов а,
# -переходить на другой a_max должны вдоль максимальных элементов b
a_max = max(a)
b_max = max(b)
i_left, i_right = a.index(a_max), a[::-1].index(a_max)
res_a = sum(a) + (m - 1) * a_max
res_b = sum(b) + i_left * b[0] + (n - i_left - i_right - 1) * b_max + i_right * b[-1]
if res_a == 0:
f_out.write(f'{res_b}')
else:
f_out.write(f'{res_a}{res_b:0>9n}')
def E():
"""
Распределённая сеть состоит из n вычислительных узлов, соединённых с помощью
помощью n−1 кабелей. Расстоянием между двумя узлами назовем минимальное
количество соединений на цепочке от одного узла к другому.
После выбора узлов-хранилищ, для каждого узла сети определяется
ближайшее к нему хранилище. Ненадёжностью сети он называет максимальное
значение этой величины по всем узлам.
Какие узлы выбрать, чтобы ненадёжность сети была минимальна?
Сатус: проходит часть тестов, не проходит по времени
"""
from collections import defaultdict
def longest_branch(graph, start, visited=None):
t = time()
if visited is None:
visited = set()
queue = [(start, 1), ]
branch = []
res = []
while queue:
root, lvl = queue.pop()
queue.extend((n, lvl+1) for n in graph[root] if n not in visited)
if len(branch) >= lvl:
if len(branch) > len(res):
res = branch.copy()
while len(branch) >= lvl:
visited.remove(branch.pop())
branch.append(root)
visited.add(root)
print(time() - t)
if len(branch) > len(res):
return branch
else:
return res
with open('input.txt', 'r') as f_in, open('output.txt', 'w') as f_out:
n = int(f_in.readline())
if n <= 3:
f_out.write('1 2')
return
net = defaultdict(list) # для каждого узла множество смежных
# связный граф с n узлами и n-1 ребрами - дерево
for _ in range(n-1):
a, b = f_in.readline().split()
net[a].append(b), net[b].append(a)
# у дерева всегда есть самый длинный путь, который можно найти используя два обхода
longest_path = longest_branch(net, start='1')
longest_path = longest_branch(net, start=longest_path[-1])
# если бы требовалось найти один такой узел, то нужно было брать центр самого длинного пути
# с двумя узлами: проделеаем это дважды
n = len(longest_path) - 1
split = n // 2
# левая часть и центр
lp = longest_branch(net, start=longest_path[0], visited={longest_path[n-split+1], })
split_left = lp[(len(lp) - 1) // 2]
# правая часть и центр
lp = longest_branch(net, start=longest_path[-1], visited={longest_path[split-1], })
split_right = lp[(len(lp) - 1) // 2]
if split_left == split_right:
# в случае симмитричного графа у нас может выбраться центральный узел дважды
split_right = lp[(len(lp) - 1) // 2 + 1]
f_out.write(f'{split_left} {split_right}')
def E_test_chain():
N = 200000
with open('input.txt', 'w') as f:
f.write(f'{N}\n')
for i in range(1, N+1):
f.write(f'{i} {i+1}\n')
t = time()
E()
print(time() - t)
def E_test_tree():
N = 200000
with open('input.txt', 'w') as f:
f.write(f'{N}\n')
for i in range(2, N+1):
f.write(f'{i} {i // 2}\n')
t = time()
E()
print(time() - t)
def F():
"""
Дан белый клетчатый листок размером n×m клеток. Сначала Альфред раскрашивает
некоторые клетки в чёрный цвет, после чего Георг должен снова перекрасить все
клетки в белый. Для этого Георг сколько угодно раз проделывает следующую операцию:
- Поместить карандаш в левый нижний угол листка, после чего нарисовать путь
в верхний правый угол листка. Путь должен проходить только по границам клеток,
кроме того, передвигать карандаш можно только вправо и вверх.
- Перекрасить все клетки листка, расположенные под нарисованным путём, в противоположный цвет.
За какое минимальное число операций Георг сможет перекрасить лист в белый?
Сатус: проходит часть тестов, не проходит по времени
"""
with open('input.txt', 'r') as f_in, open('output.txt', 'w') as f_out:
n, m, k = map(int, f_in.readline().split())
if k == 0:
f_out.write('0')
return
# таблицу представим в виде списка закрашенных полей в каждой строке
rows = [[n+1, ] for _ in range(m)]
for _ in range(k):
x, y = map(int, f_in.readline().split())
# для удобства развернем ось х
x = n - 1 - x
rows[y].append(x)
# упорядочим в строках
for v in rows:
v.sort(reverse=True)
# цвет в правом нижнем уголу
color = rows[0][-1] == 0 # True - черный, False - белый
full_rows = 0
positions = [0] * (m+1)
positions[-1] = n + 1 # буквально -1ая строка
iteration = 0
# выигрыщная стратегия для Георга - жадная, идти с правого нижнего угла
# до левого верхнего и последовательно перекрашивать цвета
while full_rows < m:
for j in range(full_rows, m):
row = rows[j]
if color:
# слопываем закрашенные ячейки в данной строке
while positions[j] == row[-1] and positions[j] < positions[j-1]:
positions[j] = row.pop() + 1
else:
# перемещаемся до ближайшей закрашенной ячейке
if row[-1] > positions[j-1]:
positions[j] = positions[j - 1]
else:
positions[j] = row[-1]
if positions[j] == n + 1:
full_rows = j + 1
elif positions[j] == 0:
break
color = not | |
<reponame>alecomunian/geone<filename>geone/customcolors.py<gh_stars>1-10
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Python module: 'customcolors.py'
author: <NAME>
date: jan-2018
Definition of custom colors and colormap.
"""
import numpy as np
import matplotlib.colors as mcolors
import matplotlib.pyplot as plt
from mpl_toolkits import axes_grid1
# ----------------------------------------------------------------------------
def add_colorbar(im, aspect=20, pad_fraction=1.0, **kwargs):
"""
Add a vertical color bar to an image plot.
(from: http://nbviewer.jupyter.org/github/mgeier/python-audio/blob/master/plotting/matplotlib-colorbar.ipynb)
"""
divider = axes_grid1.make_axes_locatable(im.axes)
width = axes_grid1.axes_size.AxesY(im.axes, aspect=1./aspect)
pad = axes_grid1.axes_size.Fraction(pad_fraction, width)
current_ax = plt.gca()
cax = divider.append_axes("right", size=width, pad=pad)
plt.sca(current_ax)
return im.axes.figure.colorbar(im, cax=cax, **kwargs)
# ----------------------------------------------------------------------------
# ----------------------------------------------------------------------------
def custom_cmap(cseq,
vseq=None,
ncol=256,
cunder=None,
cover=None,
cbad=None,
alpha=1.0,
cmap_name='custom_cmap'):
"""
Defines a custom colormap given colors at transition values:
:param cseq: (list) colors given by string or rgb-tuples
:param vseq: (list) increasing values of same length as cseq, values
corresponding to the color of cseq in the colormap,
default: None: equally spaced values are used
:param ncol: (int) number of colors for the colormap
:param cunder: (string or rgb-tuple or rgba-tuple) color for 'under' values
:param cover: (string or rgb-tuple or rgba-tuple) color for 'over' values
:param cbad: (string or rgb-tuple or rgba-tuple) color for 'bad' values
:param alpha: (float or list of floats) values of alpha channel for
transparency, for each color in cseq (if a single float
is given, the same value is used for each color)
:param cmap_name: (string) colormap name
:return: (LinearSegmentedColormap) colormap
"""
# Set alpha sequence
aseq = np.asarray(alpha, dtype=float) # numpy.ndarray (possibly 0-dimensional)
if aseq.size == 1:
aseq = aseq.flat[0] * np.ones(len(cseq))
elif aseq.size != len(cseq):
print ('ERROR: length of alpha not compatible with cseq')
return
# Set vseqn: sequence of values rescaled in [0,1]
if vseq is not None:
if len(vseq) != len(cseq):
print("ERROR: length of vseq and cseq differs")
return None
if sum(np.diff(vseq) <= 0.0 ):
print("ERROR: vseq is not an increasing sequence")
return None
# Linearly rescale vseq on [0,1]
vseqn = (np.array(vseq,dtype=float) - vseq[0]) / (vseq[-1] - vseq[0])
else:
vseqn = np.linspace(0,1,len(cseq))
# Set cseqRGB: sequence of colors as RGB-tuples
cseqRGB = []
for c in cseq:
try:
cseqRGB.append(mcolors.ColorConverter().to_rgb(c))
except:
cseqRGB.append(c)
# Set dictionary to define the color map
cdict = {
'red' :[(vseqn[i], cseqRGB[i][0], cseqRGB[i][0]) for i in range(len(cseq))],
'green':[(vseqn[i], cseqRGB[i][1], cseqRGB[i][1]) for i in range(len(cseq))],
'blue' :[(vseqn[i], cseqRGB[i][2], cseqRGB[i][2]) for i in range(len(cseq))],
'alpha':[(vseqn[i], aseq[i], aseq[i]) for i in range(len(cseq))]
}
cmap = mcolors.LinearSegmentedColormap(cmap_name, cdict, N=ncol)
if cunder is not None:
try:
cmap.set_under(mcolors.ColorConverter().to_rgba(cunder))
except:
cmap.set_under(cunder)
if cover is not None:
try:
cmap.set_over(mcolors.ColorConverter().to_rgba(cover))
except:
cmap.set_over(cover)
if cbad is not None:
try:
cmap.set_bad(mcolors.ColorConverter().to_rgba(cbad))
except:
cmap.set_bad(cbad)
return cmap
# ----------------------------------------------------------------------------
# Some colors and colormaps
# =========================
# Chart color from libreoffice (<NAME>)
col_chart01 = [x/255. for x in ( 0, 69, 134)] # dark blue
col_chart02 = [x/255. for x in (255, 66, 14)] # orange
col_chart03 = [x/255. for x in (255, 211, 32)] # yellow
col_chart04 = [x/255. for x in ( 87, 157, 28)] # green
col_chart05 = [x/255. for x in (126, 0, 33)] # dark red
col_chart06 = [x/255. for x in (131, 202, 255)] # light blue
col_chart07 = [x/255. for x in ( 49, 64, 4)] # dark green
col_chart08 = [x/255. for x in (174, 207, 0)] # light green
col_chart09 = [x/255. for x in ( 75, 31, 111)] # purple
col_chart10 = [x/255. for x in (255, 149, 14)] # dark yellow
col_chart11 = [x/255. for x in (197, 0, 11)] # red
col_chart12 = [x/255. for x in ( 0, 132, 209)] # blue
# ... other names
col_chart_purple = col_chart09
col_chart_darkblue = col_chart01
col_chart_blue = col_chart12
col_chart_lightblue = col_chart06
col_chart_green = col_chart04
col_chart_darkgreen = col_chart07
col_chart_lightgreen = col_chart08
col_chart_yellow = col_chart03
col_chart_darkyellow = col_chart10
col_chart_orange = col_chart02
col_chart_red = col_chart11
col_chart_darkred = col_chart05
# ... list
col_chart_list = [col_chart01, col_chart02, col_chart03, col_chart04,
col_chart05, col_chart06, col_chart07, col_chart08,
col_chart09, col_chart10, col_chart11, col_chart12]
# ... list reordered
col_chart_list_s = [col_chart_list[i] for i in (8, 0, 11, 5, 3, 6, 7, 2, 9, 1, 10, 4)]
# Default color for bad value (nan)
cbad_def = (.9, .9, .9, 0.5)
# colormaps
# ... default color map
cbad1 = (.9, .9, .9, 0.5)
# cunder1 = [x/255. for x in (160, 40, 160)] + [0.5] # +[0.5] ... for appending alpha channel
# cover1 = [x/255. for x in (250, 80, 120)] + [0.5] # +[0.5] ... for appending alpha channel
cunder1 = [x/255. for x in (200, 10, 250)] + [0.5] # +[0.5] ... for appending alpha channel
cover1 = [x/255. for x in (250, 10, 10)] + [0.5] # +[0.5] ... for appending alpha channel
cmaplist1 = ([x/255. for x in (160, 40, 240)],
[x/255. for x in ( 0, 240, 240)],
[x/255. for x in (240, 240, 0)],
[x/255. for x in (180, 10, 10)])
cmap1 = custom_cmap(cmaplist1, cunder=cunder1, cover=cover1, cbad=cbad1, alpha=1.0)
cmap2 = custom_cmap(['purple', 'blue', 'cyan', 'yellow', 'red', 'black'],
cunder=cbad_def, cover=cbad_def, cbad=cbad_def, alpha=1.0)
cmapW2B = custom_cmap(['white', 'black'], cunder=(0.0, 0.0, 1.0, 0.5), cover=(1.0, 0.0, 0.0, 0.5), cbad=col_chart_yellow+[0.5], alpha=1.0)
cmapB2W = custom_cmap(['black', 'white'], cunder=(0.0, 0.0, 1.0, 0.5), cover=(1.0, 0.0, 0.0, 0.5), cbad=col_chart_yellow+[0.5], alpha=1.0)
# # Notes:
# # =====
# # To use some colormaps (LinearSegmentedColormap) from matplotlib
# cm_name = [name for name in plt.cm.datad.keys()] # name of the colormaps
# cmap = plt.get_cmap('ocean') # get the colormap named 'ocean' (cm_name[105])
# # To get current rcParams (matplotlib)
# import matplotlib as mpl
# mpl.rcParams
# # Useful function to convert color from/to rgb[a]/hex:
# mcolors.to_rgb()
# mcolors.to_rgba()
# mcolors.to_hex()
# mcolors.to_hex(,keep_alpha=True)
#
# To customize existing colormap from matplotlib:
# Example, add specific colors for under values, over values and bad values in map 'terrain' with nn colors
#cmap_new_terrain = custom_cmap([plt.get_cmap('terrain')(x) for x in np.linspace(0,1,nn)], ncol=nn, cunder='pink', cover='orange', cbad='red')
cmap_new_terrain = custom_cmap([plt.get_cmap('terrain')(x) for x in np.linspace(0,1,256)], ncol=256, cunder='pink', cover='orange', cbad='red')
#####cmap_details = ccol.custom_cmap(ccol.cmaplist1, alpha=np.linspace(0, 1, len(ccol.cmaplist1), cunder=ccol.cunder1, cover=ccol.cover1, cbad=ccol.cbad1)
# ----------------------------------------------------------------------------
if __name__ == "__main__":
print("Module 'geone.customcolors' example:")
import matplotlib.pyplot as plt
# Plot a function of two variable in a given domain
# Set domain and number of cell in each direction
xmin, xmax = -2, 2
ymin, ymax = -1, 2
nx, ny = 200, 150
# Set the cell size
sx, sy = (xmax-xmin)/nx, (ymax-ymin)/ny
# Set the meshgrid
x, y = xmin + 0.5 * sx + sx * np.arange(nx), ymin + 0.5 * sy + sy * np.arange(ny)
# # equivalently:
# x, y = np.arange(xmin+sx/2, xmax, sx), np.arange(ymin+sy/2, ymax ,sy)
# x, y = np.linspace(xmin+sx/2, xmax-sx/2, nx), np.linspace(ymin+sy/2, ymax-sy/2, ny)
xx,yy = np.meshgrid(x,y)
# Set the function values
zz = xx**2 + yy**2 - 2
zz[np.where(zz < -1.7)] = np.nan
# Specify some points in the grid
px = np.arange(xmin,xmax,.1)
py = np.zeros(len(px))
pz = px**2 + py**2 - 2
pz[np.where(pz < -1.7)] = np.nan
# Set min and max value to be displayed
vmin, vmax = -1.0, 3.0
# Create a custom colormap
my_cmap = custom_cmap(('blue', 'white', 'red'), vseq=(vmin,0,vmax),
cunder='cyan', cover='violet', cbad='gray', alpha=.3)
# Display
fig, ax = plt.subplots(2,2,figsize=(16,10))
# --- 1st plot ---
cax = ax[0,0]
im_plot = cax.imshow(zz, cmap=cmap1, vmin=vmin, vmax=vmax, origin='lower',
extent=[xmin, xmax, ymin, ymax], interpolation='none')
cax.set_xlim(xmin, xmax)
cax.set_ylim(ymin, ymax)
cax.grid()
cax.set_xlabel("x-axis")
cax.set_xticks([-1, 1])
cax.set_xticklabels(['x=-1', 'x=1'], fontsize=8)
# cbarShrink = 0.9
# plt.colorbar(extend='both', shrink=cbarShrink, aspect=20*cbarShrink)
add_colorbar(im_plot, extend='both')
# add points
col = [0 for i in range(len(pz))]
for i in range(len(pz)):
if ~ np.isnan(pz[i]):
col[i] = cmap1((pz[i]-vmin)/(vmax-vmin))
else:
col[i] = mcolors.ColorConverter().to_rgba(cbad1)
cax.scatter(px, py, marker='o', s=50, edgecolor='black', color=col)
cax.set_title('colormap: cmap1')
# --- 2nd plot ---
plt.subplot(2,2,2)
im_plot = plt.imshow(zz,cmap=my_cmap, vmin=vmin, vmax=vmax, origin='lower',
extent=[xmin, xmax, ymin, ymax], interpolation='none')
plt.grid()
cbar = add_colorbar(im_plot, ticks=[vmin,vmax])
# cbar.ax.set_yticklabels(cbar.get_ticks(), fontsize=16)
cbar.set_ticks([vmin, 0, vmax])
cbar.ax.set_yticklabels(["min={}".format(vmin), 0, "max={}".format(vmax)],
fontsize=16)
col = [0 for i in range(len(pz))]
for i in range(len(pz)):
if not np.isnan(pz[i]):
col[i] = my_cmap((pz[i]-vmin)/(vmax-vmin))
else:
col[i] = mcolors.ColorConverter().to_rgba('gray')
# add points
plt.scatter(px, py, marker='o', s=50, edgecolor='black', color=col)
plt.title('colormap: my_cmap')
# --- 3rd plot ---
plt.subplot(2,2,3)
im_plot = plt.imshow(zz, cmap=custom_cmap(col_chart_list,
ncol=len(col_chart_list)),
vmin=vmin, vmax=vmax, origin='lower',
extent=[xmin,xmax,ymin,ymax], interpolation='none')
plt.grid()
# plt.colorbar(shrink=cbarShrink, aspect=20*cbarShrink)
add_colorbar(im_plot)
#col = custom_cmap(col_chart_list, ncol=len(col_chart_list))((pz-vmin)/(vmax-vmin))
col = [0 for i in range(len(pz))]
for i in range(len(pz)):
if ~ np.isnan(pz[i]):
col[i] = custom_cmap(col_chart_list, ncol=len(col_chart_list))((pz[i]-vmin)/(vmax-vmin))
else:
col[i] = mcolors.ColorConverter().to_rgba('white')
plt.scatter(px, py, marker='o', s=50, edgecolor='black', color=col)
plt.title('colormap: col_chart_list')
# --- 4th plot ---
plt.subplot(2,2,4)
im_plot = plt.imshow(zz, cmap=custom_cmap(col_chart_list_s,
ncol=len(col_chart_list_s)),
vmin=vmin, vmax=vmax, origin='lower',
extent=[xmin, xmax, ymin, ymax], interpolation='none')
plt.grid()
# plt.colorbar(shrink=cbarShrink, aspect=20*cbarShrink)
add_colorbar(im_plot)
#col = custom_cmap(col_chart_list_s, ncol=len(col_chart_list_s))((pz-vmin)/(vmax-vmin))
| |
<reponame>Mikehem/tfx<filename>tensorflow_transform/tf_utils.py
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""TF utils for computing information over given data."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# GOOGLE-INITIALIZATION
import tensorflow as tf
# TODO(https://issues.apache.org/jira/browse/SPARK-22674): Switch to
# `collections.namedtuple` or `typing.NamedTuple` once the Spark issue is
# resolved.
from tfx_bsl.types import tfx_namedtuple
from tensorflow.python.framework import composite_tensor # pylint: disable=g-direct-tensorflow-import
from tensorflow.python.util import object_identity # pylint: disable=g-direct-tensorflow-import
_FLOATING_NAN = float('nan')
# Global sentinels used to keep track of the total counts of y
GLOBAL_Y_COUNT_SENTINEL_STRING = b'global_y_count_sentinel'
GLOBAL_Y_COUNT_SENTINEL_INT = tf.int64.limits[1]
ReducedBatchWeightedCounts = tfx_namedtuple.namedtuple('ReducedBatchCounts', [
'unique_x', 'summed_weights_per_x', 'summed_positive_per_x_and_y',
'counts_per_x'
])
_CompositeTensorRef = tfx_namedtuple.namedtuple('_CompositeTensorRef',
['type_spec', 'list_of_refs'])
def copy_tensors(tensors):
"""Makes deep copies of a dict of tensors.
Makes deep copies (using tf.identity or its equivalent for `CompositeTensor`s)
of the values of `tensors`.
Args:
tensors: A a dict whose keys are strings and values are `Tensors`s or
`CompositeTensor`s.
Returns:
A copy of `tensors` with values replaced by tf.identity applied to the
value, or the equivalent for `CompositeTensor`s.
"""
return {
name: _copy_tensor_or_composite_tensor(tensor)
for name, tensor in tensors.items()
}
def _copy_tensor(tensor):
return tf.identity(tensor, name='{}_copy'.format(tensor.op.name))
def _copy_tensor_or_composite_tensor(tensor):
if isinstance(tensor, composite_tensor.CompositeTensor):
return tf.nest.map_structure(_copy_tensor, tensor, expand_composites=True)
return _copy_tensor(tensor)
def reduce_batch_weighted_counts(x, weights=None):
"""Performs batch-wise reduction to produce (possibly weighted) counts.
Args:
x: Input `Tensor`.
weights: (Optional) Weights input `Tensor`.
Returns:
a named tuple of...
The unique values in x
The sum of the weights for each unique value in x if weights are provided,
else None
"""
if isinstance(x, tf.SparseTensor):
x = x.values
if weights is None:
# TODO(b/112916494): Always do batch wise reduction once possible.
return ReducedBatchWeightedCounts(tf.reshape(x, [-1]), None, None, None)
# TODO(b/134075780): Revisit expected weights shape when input is sparse.
x, weights = assert_same_shape(x, weights)
weights = tf.reshape(weights, [-1])
x = tf.reshape(x, [-1])
unique_x_values, unique_idx, _ = tf.unique_with_counts(x, out_idx=tf.int64)
summed_weights_per_x = tf.math.unsorted_segment_sum(
weights, unique_idx, tf.size(input=unique_x_values))
return ReducedBatchWeightedCounts(unique_x_values, summed_weights_per_x, None,
None)
def reduce_batch_weighted_cooccurrences(x_input,
y_input,
weights_input=None,
extend_with_sentinel_counts=True):
"""Performs batch-wise reduction to produce weighted co-occurrences.
Computes the weighted co-occurrence of each feature value in x, for each value
in the range [0, max(y)). If extend_with_sentinel_counts is true, the return
value will include an additional sentinel token (not in the true vocabulary)
that is used to accumulate the global distribution of y values.
Args:
x_input: Input `Tensor` or `SparseTensor`.
y_input: Integer `Tensor` or `SparseTensor` with which to compute the
co-occurrence with x_input.
weights_input: (Optional) Weights input `Tensor`.
extend_with_sentinel_counts: If True, the reduced batch will be extended
a sentinel value that accumlate the total distribution of y values. Should
be True except when called recursively with the sentinel value as input.
Returns:
a namedtuple of...
unique_x_values: the unique values in x
summed_weights_per_x: sum of the weights for each unique value in x
summed_positive_per_x_and_y: If tensor y is provided, the sum of
positive weights for each unique y value, for each unique value in x.
If y tensor is not provided, value is None.
counts_per_x: if y is provided, counts of each of the unique values in x,
otherwise, None.
"""
tf.compat.v1.assert_type(y_input, tf.int64)
# TODO(b/134075780): Revisit expected weights shape when input is sparse.
if isinstance(x_input, tf.SparseTensor):
batch_indices = x_input.indices[:, 0]
# y and densified x should have the same batch dimension.
assert_eq = tf.compat.v1.assert_equal(
tf.shape(y_input)[0], tf.cast(x_input.dense_shape[0], tf.int32))
with tf.control_dependencies([assert_eq]):
y = tf.gather(y_input, batch_indices)
x = x_input.values
else:
y = y_input
x = x_input
if weights_input is None:
weights = tf.ones_like(x, dtype=tf.float32)
else:
x, weights_input = assert_same_shape(x, weights_input)
weights = weights_input
y = _broadcast_to_x_shape(x, y)
x, y = assert_same_shape(x, y)
x = tf.reshape(x, [-1])
y = tf.reshape(y, [-1])
weights = tf.reshape(weights, [-1])
unique_x_values, unique_idx, unique_count = tf.unique_with_counts(
x, out_idx=tf.int64)
summed_weights_per_x = tf.math.unsorted_segment_sum(
weights, unique_idx, tf.size(input=unique_x_values))
# For each feature value in x, computed the weighted sum positive for each
# unique value in y.
max_y_value = tf.cast(tf.reduce_max(input_tensor=y_input), tf.int64)
max_x_idx = tf.cast(tf.size(unique_x_values), tf.int64)
dummy_index = (max_y_value + 1) * unique_idx + y
summed_positive_per_x_and_y = tf.cast(
tf.math.unsorted_segment_sum(weights, dummy_index,
max_x_idx * (max_y_value + 1)),
dtype=tf.float32)
summed_positive_per_x_and_y = tf.reshape(summed_positive_per_x_and_y,
[max_x_idx, max_y_value + 1])
reduced_batch = ReducedBatchWeightedCounts(
unique_x=unique_x_values,
summed_weights_per_x=summed_weights_per_x,
summed_positive_per_x_and_y=summed_positive_per_x_and_y,
counts_per_x=unique_count)
# Add a sentinel token tracking the full distribution of y values.
if extend_with_sentinel_counts:
reduced_batch = extend_reduced_batch_with_y_counts(reduced_batch, y_input,
weights_input)
return reduced_batch
def extend_reduced_batch_with_y_counts(reduced_batch, y, weights=None):
"""Extend the ReducedBatchWeightedCounts with global counts for y.
This is used to maintain an accurate count of global frequencies of each value
in y. When x is multivalent, the sum over the summed_positive_per_x_and_y
will over-count the occurrence of y. To keep track of the true distribution
of y values, we add a sentinel value that tracks the global counts of each
distinct value in y. This is useful for computing the mutual information
between values in x and y.
Args:
reduced_batch: A ReducedBatchWeightedCounts instance.
y: A `Tensor` representing a batch of y values.
weights: Optional `Tensor` representing a batch of weight values.
Returns:
A new ReducedBatchWeightedCounts instance with sentinel values appended.
"""
# Create a dummy sentinel token that is present in every record.
if reduced_batch.unique_x.dtype.is_integer:
sentinel_values = tf.cast(
tf.fill(tf.shape(y), GLOBAL_Y_COUNT_SENTINEL_INT), tf.int64)
else:
sentinel_values = tf.fill(tf.shape(y), GLOBAL_Y_COUNT_SENTINEL_STRING)
# Computing the batch reduction over this sentinel token will reduce to a
# single sentinel value in sentinel_batch.unique_x, with the
# summed_positive_per_x_and_y thus capturing the total summed positive per
# value in y.
sentinel_batch = reduce_batch_weighted_cooccurrences(
sentinel_values, y, weights, extend_with_sentinel_counts=False)
# Concatenate the sentinel counts with the existing reduced batch.
return ReducedBatchWeightedCounts(
unique_x=tf.concat([reduced_batch.unique_x, sentinel_batch.unique_x],
axis=0),
summed_weights_per_x=tf.concat([
reduced_batch.summed_weights_per_x,
sentinel_batch.summed_weights_per_x
],
axis=0),
summed_positive_per_x_and_y=tf.concat([
reduced_batch.summed_positive_per_x_and_y,
sentinel_batch.summed_positive_per_x_and_y
],
axis=0),
counts_per_x=tf.concat(
[reduced_batch.counts_per_x, sentinel_batch.counts_per_x], axis=0))
def hashable_tensor_or_op(tensor_or_op):
"""Returns a hashable reference to a Tensor if given a Tensor/CompositeTensor.
Use deref_tensor_or_op on the result to get the Tensor (or SparseTensor).
Args:
tensor_or_op: A `tf.Tensor`, `tf.CompositeTensor`, or other type.
Returns:
A hashable representation for the Tensor or CompositeTensor, or the original
value for other types.
"""
if isinstance(tensor_or_op, tf.Tensor):
return tensor_or_op.experimental_ref()
if isinstance(tensor_or_op, composite_tensor.CompositeTensor):
# TODO(b/156759471): Use tf.type_spec_from_value here.
return _CompositeTensorRef(
type_spec=tensor_or_op._type_spec, # pylint: disable=protected-access
list_of_refs=tuple(
hashable_tensor_or_op(component) for component in tf.nest.flatten(
tensor_or_op, expand_composites=True)
))
return tensor_or_op
def deref_tensor_or_op(tensor_or_op):
"""Returns a Tensor or CompositeTensor if given a reference, otherwise input.
Args:
tensor_or_op: An output of `hashable_tensor_or_op`.
Returns:
A Tensor, CompositeTensor, or the given tensor_or_op.
"""
if isinstance(tensor_or_op, object_identity.Reference):
return tensor_or_op.deref()
if isinstance(tensor_or_op, _CompositeTensorRef):
return tf.nest.pack_sequence_as(
structure=tensor_or_op.type_spec,
flat_sequence=[
deref_tensor_or_op(component)
for component in tensor_or_op.list_of_refs
],
expand_composites=True)
return tensor_or_op
def _broadcast_to_x_shape(x, y):
"""Broadcasts y to same shape as x as needed.
Args:
x: An input feature.
y: A feature that is either the same shape as x or has the same outer
dimensions as x. If the latter, y is broadcast to the same shape as x.
Returns:
A Tensor that contains the broadcasted feature, y.
"""
# The batch dimension of x and y must be the same, and y must be 1D.
x_shape = tf.shape(input=x)
y_shape = tf.shape(input=y)
assert_eq = tf.compat.v1.assert_equal(x_shape[0], y_shape[0])
with tf.control_dependencies([assert_eq]):
y = tf.identity(y)
rank_delta = tf.rank(x) - tf.rank(y)
target_shape = tf.concat(
[tf.shape(y), tf.ones(rank_delta, dtype=tf.int32)], axis=0)
matched_rank = tf.reshape(y, target_shape)
return tf.broadcast_to(matched_rank, x_shape)
def assert_same_shape(x, y):
"""Asserts two tensors have the same dynamic and static shape.
Args:
x: A `Tensor`.
y: A `Tensor`
Returns:
The elements `x` and `y`, the results must be used in order to ensure that
the dynamic check is executed.
"""
x.shape.assert_is_compatible_with(y.shape)
assert_eq = tf.compat.v1.assert_equal(tf.shape(input=x), tf.shape(input=y))
with tf.control_dependencies([assert_eq]):
return tf.identity(x), tf.identity(y)
def reduce_batch_count(x, reduce_instance_dims):
"""Counts elements in the given tensor.
Args:
x: A `Tensor` or `SparseTensor`.
reduce_instance_dims: A bool, if True - collapses the batch and instance
dimensions to arrive at a single scalar output. Otherwise, only
collapses the batch dimension and outputs a `Tensor` of the same shape
as the input.
Returns:
The element count of `x`. The result is | |
import random
import pytest
from faker import Faker
from flask import url_for
from .models import Location, Address, Area, Country, AreaSchema, LocationSchema, AddressSchema
from .. import db
from ..images.create_image_data import create_images_locations, create_test_images
from ..images.models import Image, ImageLocation
area_schema = AreaSchema()
location_schema = LocationSchema()
address_schema = AddressSchema()
@pytest.mark.smoke
@pytest.mark.parametrize('code, name', [('US', 'United States'),
('EC', 'Ecuador'),
('TH', 'Thailand')])
def test_read_country(auth_client, code, name):
count = Country.load_from_file()
assert count > 0
resp = auth_client.get(url_for('places.read_countries', country_code=code, locale='en-US'))
assert resp.status_code == 200
print("RESP", resp.json)
assert resp.json['name'] == name
@pytest.mark.slow
def test_read_all_countries(auth_client):
count = Country.load_from_file()
assert count > 0
resp = auth_client.get(url_for('places.read_countries', locale='en-US'))
assert resp.status_code == 200
assert len(resp.json) == count
@pytest.mark.smoke
def test_missing_locale(auth_client):
resp = auth_client.get(url_for('places.read_countries'))
assert resp.status_code == 400
resp = auth_client.get(url_for('places.read_countries', country_code='US'))
assert resp.status_code == 400
class RandomLocaleFaker:
"""Generate multiple fakers for different locales."""
def __init__(self, *locales):
self.fakers = [Faker(loc) for loc in locales]
def __call__(self):
"""Return a random faker."""
return random.choice(self.fakers)
rl_fake = RandomLocaleFaker('en_US', 'es_MX')
fake = Faker() # Generic faker; random-locale ones don't implement everything.
def flip():
"""Return true or false randomly."""
return random.choice((True, False))
def area_factory(sqla):
"""Create a fake area."""
countries = sqla.query(Country).all()
if not countries:
Country.load_from_file()
countries = sqla.query(Country).all()
area = {
# using last_name for testing purposes, will be area name
'name': rl_fake().last_name(),
'country_code': random.choice(countries).code
}
return area
areas = sqla.query(Area).all()
def address_factory(sqla):
"""Create a fake address."""
fake = Faker() # Use a generic one; others may not have all methods.
addresslines = fake.address().splitlines()
areas = sqla.query(Area).all()
if not areas:
create_multiple_areas(sqla, random.randint(3, 6))
areas = sqla.query(Area).all()
current_area = random.choice(areas)
address = {
'name': fake.name(),
'address': addresslines[0],
'city': addresslines[1].split(",")[0],
'area_id': current_area.id,
'country_code': current_area.country_code,
'latitude': random.random() * 0.064116 + -2.933783,
'longitude': random.random() * 0.09952 + -79.055411
}
return address
def location_factory(sqla):
"""Create a fake location"""
fake = Faker() # Use a generic one; others may not have all methods.
addresses = sqla.query(Address).all()
if not addresses:
create_multiple_addresses(sqla, random.randint(3, 6))
addresses = sqla.query(Address).all()
current_address = random.choice(addresses)
location = {
'description': fake.name(),
'address_id': current_address.id
}
return location
def create_multiple_areas(sqla, n):
"""Commit `n` new areas to the database. Return their IDs."""
area_schema = AreaSchema()
new_areas = []
for i in range(n):
valid_area = area_schema.load(area_factory(sqla))
new_areas.append(Area(**valid_area))
sqla.add_all(new_areas)
sqla.commit()
def create_multiple_addresses(sqla, n):
"""Commit `n` new addresses to the database. Return their IDs."""
address_schema = AddressSchema()
new_address = []
for i in range(n):
valid_address = address_schema.load(address_factory(sqla))
new_address.append(Address(**valid_address))
sqla.add_all(new_address)
sqla.commit()
def create_multiple_locations(sqla, n):
"""Commit `n` new locations to the database. Return their IDs."""
location_schema = LocationSchema()
new_locations = []
for i in range(n):
valid_location = location_schema.load(location_factory(sqla))
new_locations.append(Location(**valid_location))
sqla.add_all(new_locations)
sqla.commit()
# the addresses won't have latitude and longitude
def create_location_nested(sqla, address, address_name, description, country_code='EC', area_name='Azuay',
city='Cuenca'):
# {
# ------- Country related
# 'country_code': 'US', # required for nesting
# ------- Area related
# 'area_name': 'area name', # required for nesting
# ------- Address related
# 'city': 'Upland', # required if address doesn't exist in database
# 'address': '236 W. Reade Ave.', # required if address doesn't exist in database
# 'address_name': 'Taylor University', # required if address doesn't exist in database
# ------- Location related
# 'description': 'Euler 217' # optional
# }
# This method tries to link existing entries in Country, Area, Address table if possible, otherwise create
# When there is at least a certain table related field in the payload, the foreign key specified in the payload for that table will be overridden by the fields given
def debugPrint(msg):
print(msg)
resolving_keys = ('country_code', 'area_name', 'city', 'address', 'address_name')
payload_data = locals()
# process country information
resolve_needed = True
location_payload = {}
if resolve_needed:
debugPrint("starting to resolve")
debugPrint(payload_data)
# resolve country
if 'country_code' not in payload_data:
print("'country_code not specified in request body', 422")
return
country = sqla.query(Country).filter_by(code=payload_data['country_code']).first()
if not country:
print(f"no country code found in database matching {payload_data['country_code']}")
return
country_code = country.code
debugPrint(f"Country code resolved: {country_code}")
# resolve area
if 'area_name' not in payload_data:
print("'area_name not specified in request body', 422")
return
area = sqla.query(Area).filter_by(country_code=country_code, name=payload_data['area_name']).first()
area_id = None
if area:
area_id = area.id
debugPrint(f"fetched existing area_id {area_id}")
else:
debugPrint(f"creating new area")
area_payload = {
'name': payload_data['area_name'],
'country_code': country_code
}
valid_area = area_schema.load(area_payload)
area = Area(**valid_area)
sqla.add(area)
sqla.flush()
area_id = area.id
debugPrint(f"new_area created with id {area_id}")
# resolve address
address_name_transform = {'address_name': 'name'}
address_keys = ('city', 'address', 'address_name')
address_payload = {k if k not in address_name_transform else address_name_transform[k]: v
for k, v in payload_data.items() if k in address_keys}
address_payload['area_id'] = area_id
address_payload['country_code'] = country_code
address = sqla.query(Address).filter_by(**address_payload).first()
address_id = None
if address:
address_id = address.id
debugPrint(f"fetched existing address id {address_id}")
else:
debugPrint(f"creating new address")
debugPrint(f"address payload {address_payload}")
valid_address = address_schema.load(address_payload)
address = Address(**valid_address)
sqla.add(address)
sqla.flush()
address_id = address.id
debugPrint(f"new_address created with id {address_id}")
# setting the request for location with the address_id obtained
location_payload['address_id'] = address_id
location_payload['description'] = description
else:
debugPrint("no need to resolve")
debugPrint(f"final request for location: {location_payload} ")
valid_location = location_schema.load(location_payload)
new_location = Location(**valid_location)
sqla.add(new_location)
sqla.commit()
def prep_database(sqla):
"""Prepare the database with a random number of people, some of which have accounts.
Returns list of IDs of the new accounts.
"""
create_multiple_areas(sqla, random.randint(5, 15))
create_multiple_addresses(sqla, random.randint(5, 15))
create_multiple_locations(sqla, random.randint(5, 15))
return [area.id for area in sqla.query(Area.id).all()]
# ---- Area
@pytest.mark.smoke
def test_create_area(auth_client):
# GIVEN an empty database
Country.load_from_file()
count = random.randint(3, 6)
# GIVEN areas with good data
for i in range(count):
new_area = area_factory(auth_client.sqla)
# WHEN areas are attempted to be created
resp = auth_client.post(url_for('places.create_area'), json=new_area)
# THEN expect creates to run OK
assert resp.status_code == 201
# THEN expect rows to be created
assert auth_client.sqla.query(Area).count() == count
@pytest.mark.smoke
def test_create_area_invalid(auth_client):
# GIVEN an empty database
Country.load_from_file()
count = random.randint(3, 6)
# GIVEN new areas with bad data
for i in range(count):
new_area = area_factory(auth_client.sqla)
if flip():
new_area['name'] = None
if flip():
new_area['country_code'] = None
if not (new_area['name'] is None or new_area['country_code'] is None):
new_area[fake.word()] = fake.word()
# WHEN areas with bad data are attempted to be created
resp = auth_client.post(url_for('places.create_area'), json=new_area)
# THEN expect the request to be unprocessable
assert resp.status_code == 422
# THEN expect no rows to be created
assert auth_client.sqla.query(Area).count() == 0
@pytest.mark.smoke
def test_read_area(auth_client):
# GIVEN an empty DB
# WHEN we add a collection of areas
Country.load_from_file()
count = random.randint(3, 6)
create_multiple_areas(auth_client.sqla, count)
# WHEN we ask for them all
areas = auth_client.sqla.query(Area).all()
# THEN we expect the same number
assert db.session.query(Area).count() == count
# WHEN we request each of them from the server
for area in areas:
resp = auth_client.get(url_for('places.read_one_area', area_id=area.id))
# THEN we find a matching person
assert resp.status_code == 200
assert resp.json['name'] == area.name
assert resp.json['country_code'] == area.country_code
@pytest.mark.smoke
def test_read_all_areas(auth_client):
# GIVEN an empty DB
# WHEN we add a collection of areas.
Country.load_from_file()
count = random.randint(3, 6)
create_multiple_areas(auth_client.sqla, count)
assert count > 0
# WHEN we request all areas from the server
resp = auth_client.get(url_for('places.read_all_areas', locale='en-US'))
# THEN the count matches the number of entries in the database
assert resp.status_code == 200
assert len(resp.json) == count
@pytest.mark.smoke
def test_replace_area(auth_client):
# GIVEN a set of areas
Country.load_from_file()
count = random.randint(3, 6)
create_multiple_areas(auth_client.sqla, count)
areas = auth_client.sqla.query(Area).all()
# GIVEN new areas with good data
for area in areas:
new_area = area_factory(auth_client.sqla)
# WHEN areas are requested to be replaced with new areas
resp = auth_client.put(url_for('places.replace_area', area_id=area.id), json=new_area)
# THEN expect request to run OK
assert resp.status_code == 200
# THEN expect areas to be replaced
assert resp.json['id'] == area.id
if new_area['name'] != area.name:
assert resp.json['name'] != area.name
else:
assert resp.json['name'] == area.name
if new_area['country_code'] != area.country_code:
assert resp.json['country_code'] != area.country_code
else:
assert resp.json['country_code'] == area.country_code
@pytest.mark.smoke
def test_replace_area_invalid(auth_client):
# GIVEN a set of areas
Country.load_from_file()
count = random.randint(3, 6)
create_multiple_areas(auth_client.sqla, count)
areas = auth_client.sqla.query(Area).all()
# GIVEN new areas with bad data
for area in areas:
new_area = area_factory(auth_client.sqla)
if flip():
new_area['name'] = None
if flip():
new_area['country_code'] = None
if not (new_area['name'] is None or new_area['country_code'] is None):
new_area[fake.word()] = fake.word()
# WHEN areas are requested to be replaced with bad areas
resp = auth_client.put(url_for('places.replace_area', area_id=area.id), json=new_area)
# THEN expect request to be unprocessable
assert resp.status_code == 422
@pytest.mark.smoke
def test_update_area(auth_client):
# GIVEN a set of areas
Country.load_from_file()
count = random.randint(3, 6)
create_multiple_areas(auth_client.sqla, count)
areas = auth_client.sqla.query(Area).all()
# GIVEN good modifcation data
| |
import * # NOQA
>>> scores, labels = testdata_scores_labels()
>>> self = ConfusionMetrics().fit(scores, labels)
>>> xdata = self.tpr
>>> ydata = self.thresholds
>>> pt = 1.0
>>> #xdata = self.fpr
>>> #ydata = self.thresholds
>>> #pt = 0.0
>>> thresh = interpolate_replbounds(xdata, ydata, pt, maximize=True)
>>> print('thresh = %r' % (thresh,))
>>> thresh = interpolate_replbounds(xdata, ydata, pt, maximize=False)
>>> print('thresh = %r' % (thresh,))
Example:
>>> # DISABLE_DOCTEST
>>> from vtool.confusion import * # NOQA
>>> xdata = np.array([0.7, 0.8, 0.8, 0.9, 0.9, 0.9])
>>> ydata = np.array([34, 26, 23, 22, 19, 17])
>>> pt = np.array([.85, 1.0, -1.0])
>>> interp_vals = interpolate_replbounds(xdata, ydata, pt)
>>> result = ('interp_vals = %s' % (str(interp_vals),))
>>> print(result)
interp_vals = [ 22.5 17. 34. ]
"""
if not ut.issorted(xdata):
if ut.issorted(xdata[::-1]):
xdata = xdata[::-1]
ydata = ydata[::-1]
else:
raise AssertionError('need to sort xdata and ydata in function')
sortx = np.lexsort(np.vstack([np.arange(len(xdata)), xdata]))
xdata = xdata.take(sortx, axis=0)
ydata = ydata.take(sortx, axis=0)
is_scalar = not ub.iterable(pt)
# print('----')
# print('xdata = %r' % (xdata,))
# print('ydata = %r' % (ydata,))
if is_scalar:
pt = np.array([pt])
minval = xdata.min()
maxval = xdata.max()
argx_min_list = np.argwhere(xdata == minval)
argx_max_list = np.argwhere(xdata == maxval)
argx_min = argx_min_list.min()
argx_max = argx_max_list.max()
lower_mask = pt < xdata[argx_min]
upper_mask = pt > xdata[argx_max]
interp_mask = ~np.logical_or(lower_mask, upper_mask)
# if isinstance(pt, np.ndarray):
dtype = np.result_type(np.float32, ydata.dtype)
interp_vals = np.empty(pt.shape, dtype=dtype)
interp_vals[lower_mask] = ydata[argx_min]
interp_vals[upper_mask] = ydata[argx_max]
# TODO: fix duplicate values depending on if higher or lower numbers are
# desirable
if True:
# Grouping should be ok because xdata should be sorted
# therefore groupxs are consecutive
import vtool as vt
unique_vals, groupxs = vt.group_indices(xdata)
grouped_ydata = vt.apply_grouping(ydata, groupxs)
if maximize:
sub_idxs = [idxs[np.argmax(ys)] for idxs, ys in zip(groupxs, grouped_ydata)]
else:
sub_idxs = [idxs[np.argmin(ys)] for idxs, ys in zip(groupxs, grouped_ydata)]
sub_idxs = np.array(sub_idxs)
xdata = xdata[sub_idxs]
ydata = ydata[sub_idxs]
if np.any(interp_mask):
# FIXME: allow assume_sorted = False
func = scipy.interpolate.interp1d(xdata, ydata, kind='linear', assume_sorted=True)
interp_vals[interp_mask] = func(pt[interp_mask])
if is_scalar:
interp_vals = interp_vals[0]
# interpolate to target recall
# right_index = indicies[0]
# right_recall = self.recall[right_index]
# left_index = right_index - 1
# left_recall = self.recall[left_index]
# stepsize = right_recall - left_recall
# alpha = (target_recall - left_recall) / stepsize
# left_fpr = self.fpr[left_index]
# right_fpr = self.fpr[right_index]
# interp_fpp = (left_fpr * (1 - alpha)) + (right_fpr * (alpha))
return interp_vals
def interpolate_precision_recall(precision, recall, nSamples=11):
"""
Interpolates precision as a function of recall p_{interp}(r)
Reduce wiggles in average precision curve by taking interpolated values
along a uniform sample.
References:
http://en.wikipedia.org/wiki/Information_retrieval#Average_precision
http://en.wikipedia.org/wiki/Information_retrieval#Mean_Average_precision
CommandLine:
python -m vtool.confusion --test-interpolate_precision_recall --show
Example:
>>> # ENABLE_DOCTEST
>>> from vtool.confusion import * # NOQA
>>> scores, labels = testdata_scores_labels()
>>> nSamples = 11
>>> confusions = ConfusionMetrics().fit(scores, labels)
>>> precision = confusions.precision
>>> recall = confusions.recall
>>> recall_domain, p_interp = interpolate_precision_recall(confusions.precision, recall, nSamples=11)
>>> result = ub.repr2(p_interp, precision=1, with_dtype=True)
>>> print(result)
>>> # xdoctest: +REQUIRES(--show)
>>> draw_precision_recall_curve(recall_domain, p_interp)
>>> ut.show_if_requested()
np.array([ 1. , 1. , 1. , 1. , 1. , 1. , 1. , 0.9, 0.9, 0.8, 0.6], dtype=np.float64)
"""
if precision is None:
return None, None
recall_domain = np.linspace(0, 1, nSamples)
if False:
# normal interpolation
func = scipy.interpolate.interp1d(
recall, precision, bounds_error=False, fill_value=precision.max()
)
p_interp = func(recall_domain)
else:
# Pascal interpolation
# candidate_masks = recall >= recall_domain[:, None]
# candidates_idxs_ = [np.where(mask)[0] for mask in candidate_masks]
# chosen_idx = [-1 if len(idxs) == 0 else idxs.min() for idxs in candidates_idxs_]
# p_interp = precision[chosen_idx]
def p_interp(r):
precision_candidates = precision[recall >= r]
if len(precision_candidates) == 0:
return 0
return precision_candidates.max()
p_interp = np.array([p_interp(r) for r in recall_domain])
return recall_domain, p_interp
def interact_roc_factory(confusions, target_tpr=None, show_operating_point=False):
r"""
Args:
confusions (Confusions):
CommandLine:
python -m vtool.confusion --exec-interact_roc_factory --show
Example:
>>> # DISABLE_DOCTEST
>>> from vtool.confusion import * # NOQA
>>> scores, labels = testdata_scores_labels()
>>> print('scores = %r' % (scores,))
>>> confusions = ConfusionMetrics().fit(scores, labels)
>>> print(ut.make_csv_table(
>>> [confusions.fpr, confusions.tpr, confusions.thresholds],
>>> ['fpr', 'tpr', 'thresh']))
>>> # xdoctest: +REQUIRES(--show)
>>> ROCInteraction = interact_roc_factory(confusions, target_tpr=.4, show_operating_point=True)
>>> inter = ROCInteraction()
>>> inter.show_page()
>>> # xdoctest: +REQUIRES(--show)
>>> import wbia.plottool as pt
>>> ut.show_if_requested()
"""
from wbia.plottool.abstract_interaction import AbstractInteraction
class ROCInteraction(AbstractInteraction):
"""
References:
http://scipy-central.org/item/38/1/roc-curve-demo
Notes:
Sensitivity = true positive rate
Specificity = true negative rate
"""
def __init__(self, **kwargs):
print('ROC Interact')
super(ROCInteraction, self).__init__(**kwargs)
self.confusions = confusions
self.target_fpr = None
self.show_operating_point = show_operating_point
@staticmethod
def static_plot(fnum, pnum, **kwargs):
# print('ROC Interact2')
kwargs['thresholds'] = kwargs.get('thresholds', confusions.thresholds)
kwargs['show_operating_point'] = kwargs.get(
'show_operating_point', show_operating_point
)
confusions.draw_roc_curve(
fnum=fnum, pnum=pnum, target_tpr=target_tpr, **kwargs
)
def plot(self, fnum, pnum):
# print('ROC Interact3')
self.static_plot(
fnum,
pnum,
target_fpr=self.target_fpr,
show_operating_point=self.show_operating_point,
)
def on_click_inside(self, event, ex):
self.target_fpr = event.xdata
self.show_page()
self.draw()
def on_drag(self, event):
# FIXME: blit
if False:
# print('Dragging ' + str(event.x) + ' ' + str(event.y))
self.target_fpr = event.xdata
self.show_page()
# self.draw()
if event.inaxes is not None:
self.fig.canvas.blit(event.inaxes.bbox)
# [blit(ax) event.canvas.figure.axes]
return ROCInteraction
def draw_roc_curve(
fpr,
tpr,
fnum=None,
pnum=None,
marker='',
target_tpr=None,
target_fpr=None,
thresholds=None,
color=None,
name=None,
label=None,
show_operating_point=False,
):
r"""
Args:
fpr (?):
tpr (?):
fnum (int): figure number(default = None)
pnum (tuple): plot number(default = None)
marker (str): (default = '-x')
target_tpr (None): (default = None)
target_fpr (None): (default = None)
thresholds (None): (default = None)
color (None): (default = None)
show_operating_point (bool): (default = False)
CommandLine:
python -m vtool.confusion --exec-draw_roc_curve --show --lightbg
Example:
>>> # DISABLE_DOCTEST
>>> from vtool.confusion import * # NOQA
>>> scores, labels = testdata_scores_labels()
>>> confusions = ConfusionMetrics().fit(scores, labels)
>>> fpr = confusions.fpr
>>> tpr = confusions.tpr
>>> thresholds = confusions.thresholds
>>> fnum = None
>>> pnum = None
>>> marker = 'x'
>>> target_tpr = .85
>>> target_fpr = None
>>> color = None
>>> show_operating_point = True
>>> draw_roc_curve(fpr, tpr, fnum, pnum, marker, target_tpr, target_fpr,
>>> thresholds, color, show_operating_point)
>>> ut.show_if_requested()
"""
import wbia.plottool as pt
import sklearn.metrics
if fnum is None:
fnum = pt.next_fnum()
# if color is None:
# color = (0.4, 1.0, 0.4) if pt.is_default_dark_bg() else (0.1, 0.4, 0.4)
roc_auc = sklearn.metrics.auc(fpr, tpr)
title_suffix = ''
if target_fpr is not None:
# func = scipy.interpolate.interp1d(fpr, tpr, kind='linear', assume_sorted=False)
# func = scipy.interpolate.interp1d(xdata, ydata, kind='nearest', assume_sorted=False)
# interp_vals[interp_mask] = func(pt[interp_mask])
target_fpr = np.clip(target_fpr, 0, 1)
interp_tpr = interpolate_replbounds(fpr, tpr, target_fpr)
choice_tpr = interp_tpr
choice_fpr = target_fpr
elif target_tpr is not None:
target_tpr = np.clip(target_tpr, 0, 1)
interp_fpr = interpolate_replbounds(tpr, fpr, target_tpr)
choice_tpr = target_tpr
choice_fpr = interp_fpr
else:
choice_tpr = None
choice_fpr = None
if choice_fpr is not None:
choice_thresh = 0
if thresholds is not None:
try:
index = np.nonzero(tpr >= choice_tpr)[0][0]
except IndexError:
index = len(thresholds) - 1
choice_thresh = thresholds[index]
# percent = ut.scalar_str(choice_tpr * 100).split('.')[0]
# title_suffix = ', FPR%s=%05.2f%%' % (percent, choice_fpr)
title_suffix = ''
if show_operating_point:
title_suffix = ', fpr=%.2f, tpr=%.2f, thresh=%.2f' % (
choice_fpr,
choice_tpr,
choice_thresh,
)
else:
title_suffix = ''
# if recall_domain is None:
# ave_p = np.nan
# else:
# ave_p = p_interp.sum() / p_interp.size
title = 'Receiver operating characteristic'
if name and not label:
title += ' (%s)' % (name,)
if not label:
title += '\n' + 'AUC=%.3f' % (roc_auc,)
else:
label += ' AUC=%.3f' % (roc_auc,)
title += title_suffix
label_list = None
if label:
label_list = [label]
pt.multi_plot(
fpr,
[tpr],
label_list=label_list,
marker=marker,
color=color,
fnum=fnum,
pnum=pnum,
title=title,
xlabel='False Positive Rate',
ylabel='True Positive Rate',
)
# pt.plot2(fpr, tpr, marker=marker,
# x_label='False Positive Rate',
# y_label='True Positive Rate',
# unitbox=True, flipx=False, color=color, fnum=fnum, pnum=pnum,
# title=title)
if False:
# Interp does not work right because of duplicate values
# in xdomain
line_ = np.linspace(0.11, 0.9, 20)
# np.append([np.inf], np.diff(fpr)) > 0
# np.append([np.inf], np.diff(tpr)) > 0
unique_tpr_idxs = np.nonzero(np.append([np.inf], np.diff(tpr)) > 0)[0]
unique_fpr_idxs = np.nonzero(np.append([np.inf], np.diff(fpr)) > 0)[0]
pt.plt.plot(
line_,
interpolate_replbounds(fpr[unique_fpr_idxs], tpr[unique_fpr_idxs], line_),
'b-x',
)
pt.plt.plot(
interpolate_replbounds(tpr[unique_tpr_idxs], fpr[unique_tpr_idxs], line_),
line_,
'r-x',
)
if choice_fpr is not None:
pt.plot(choice_fpr, choice_tpr, 'o', color=pt.PINK)
def draw_precision_recall_curve(
recall_domain, p_interp, title_pref=None, fnum=1, pnum=None, color=None
):
import wbia.plottool as pt
if color is None:
color = (0.4, 1.0, | |
source, destination, extraHopLimit = 0 ):
"""Same as findShortestPaths, but it uses the heuristic path
so it runs much faster. I believe the results should be
the same, but I have not thought about it enough to be able to
be certain about it."""
if not self.neighboursFound:
self.findNeighbours()
if source == destination:
return ( Path( self.nodes[source] ), )
paths = []
shortestPathLength = -1
queue = ListSubclassFifo()
queue.enqueue( HeuristicPath2( self.nodes[source] ) )
# Iterates efficiently (i hope) through the queue
# We dequeue then expand the neighbours:
# This saves a big chunk of RAM, because the neighbours are not
# created until needed, and at the end, lots of paths are thrown away
# It uses less system time, but uses slightly more user time
for c in queue:
for current in c.unvisitedPaths():
#~ print "current path =", self.pathToIndexes( current ),
if ( current.last() == self.nodes[destination] ):
#~ print "valid"
# We found a valid path: add it
paths.append( current )
if ( shortestPathLength == -1 ):
#~ print "shortest path found", len( current )
# This is BFS, so the first path we find is the shortest
shortestPathLength = len( current )
# Go through the queue backwards, and delete anything that is longer than the limit
while len( queue) and len( queue.peekAtEnd() ) +1 > shortestPathLength + extraHopLimit:
queue.dequeueEnd()
elif shortestPathLength == -1 or len( current ) + 1 <= shortestPathLength + extraHopLimit:
#~ print "queued"
# If any other paths will be within the length limit, add to the queue (keep searching)
# testing to make sure that the destination isn't in the visited list was a slight performance LOSS
queue.append( current )
#~ else:
#~ print "discarded"
return paths
def findShortestPathsOnly( self, source, destination ):
"""Yet another findShortestPaths, implementation, but this one
finds only the shortest paths."""
if source == destination:
return ( Path( self.nodes[source] ), )
if not self.neighboursFound:
self.findNeighbours()
# Make sure all the shortest path data in all the nodes is erased
for node in self.nodes:
node.shortestPathLength = sys.maxint
node.shortestPaths = []
shortestPathLength = sys.maxint
source = self.nodes[source]
destination = self.nodes[destination]
source.shortestPathLength = 1 # Length is nodes, not hops
source.shortestPaths = [ ( source, ) ]
queue = ListSubclassFifo()
queue.enqueue( source )
# Iterates efficiently (i hope) through the queue
for node in queue:
if node == destination:
break
# Check to see if we have a shortest path to our neighbours
for neighbour in node.neighbours:
if neighbour.shortestPathLength >= node.shortestPathLength + 1:
# The neighbour is already queued if there are shortest paths
isQueued = len( neighbour.shortestPaths ) > 0
# Verify that my assumptions about the node visiting order is correct
if neighbour.shortestPathLength == node.shortestPathLength + 1:
assert( len( neighbour.shortestPaths ) > 0 )
else:
assert( neighbour.shortestPathLength == sys.maxint )
# If we found a shorter path to this neighbour,
# forget all the other paths
if neighbour.shortestPathLength > node.shortestPathLength + 1:
assert( neighbour.shortestPathLength == sys.maxint )
neighbour.shortestPaths = []
neighbour.shortestPathLength = node.shortestPathLength + 1
assert( neighbour.shortestPathLength == node.shortestPathLength + 1 )
for path in node.shortestPaths:
assert( neighbour not in path )
newPath = path + (neighbour,)
neighbour.shortestPaths.append( newPath )
# Queue the neighbour to be visited if it isn't already queued
if not isQueued:
queue.enqueue( neighbour )
paths = destination.shortestPaths
# Save some memory: go free all the paths we created.
for node in self.nodes:
node.shortestPathLength = sys.maxint
node.shortestPaths = None
# Convert the paths into Path objects
pathObjs = []
for pathTuple in paths:
pathObj = Path( pathTuple[0] )
for node in pathTuple[1:]:
pathObj.append( node )
pathObjs.append( pathObj )
return pathObjs
def routeAll( self, maxPaths=None ):
"""Floyd-Warshall all pairs shortest path. O(n^3)
Implemented thanks to: http://ai-depot.com/BotNavigation/Path-AllPairs.html
Computes all the shortest paths in the network. It will
only do this computation once, and it is faster to do it for
the entire network at the same time than it is to do if for
each pair of nodes in order.
After calling this, paths can be found via:
self.nodes[source].routes[destination]"""
if not self.neighboursFound:
self.findNeighbours()
# Creates an NxN table to store all shortest paths
pathsTable = [ None ] * len( self.nodes )
for i in xrange( len( self.nodes ) ):
pathsTable[i] = [ None ] * len( self.nodes )
# Initialize the table based on the neighbour information
for nodeIndex, node in enumerate( self.nodes ):
for neighbour in node.neighbours:
neighbourIndex = self.nodes.index( neighbour )
# We only do part of the matrix, then we copy and reverse it later
if neighbourIndex > nodeIndex:
path = [ node ]
path.append( neighbour )
pathsTable[ nodeIndex ][ neighbourIndex ] = [ path ]
path2 = list( path )
path2.reverse()
pathsTable[ neighbourIndex ][ nodeIndex ] = [ path2 ]
for k in xrange( len( self.nodes ) ):
for i in xrange( len( self.nodes ) ):
# We cannot detour i -> k, therefore, the table will be unchanged
if pathsTable[i][k] == None:
continue
ikCost = len( pathsTable[i][k][0] )
for j in xrange( len( self.nodes ) ):
# Skip non-paths
if i == j: continue
if pathsTable[k][j] != None:
kjCost = len( pathsTable[k][j][0] )
newCost = ikCost + kjCost - 1 # Shared node!
ijCost = sys.maxint
if pathsTable[i][j] != None:
ijCost = len( pathsTable[i][j][0] )
if ( newCost <= ijCost ):
# The new detour is as good or better than the old one
if ( newCost < ijCost ):
# We found a cheaper path: forget all those old paths
pathsTable[i][j] = []
# Extend all the i -> k paths by appending the k -> j paths
for ikPath in pathsTable[i][k]:
for kjPath in pathsTable[k][j]:
newPath = list( ikPath )
newPath.extend( kjPath[1:] )
assert( len( newPath ) == newCost )
pathsTable[i][j].append( newPath)
if maxPaths != None and len( pathsTable[i][j] ) > maxPaths*10:
#~ print "pathsTable[%d][%d] has %d paths" % ( i, j, len( pathsTable[i][j] ) )
# Select a subset of the paths to be kept, the rest will be discarded
pathsTable[i][j] = random.sample( pathsTable[i][j], maxPaths )
for source in xrange( len( self.nodes ) ):
for destination in xrange( len( self.nodes ) ):
pathObjs = None
if source == destination:
pathObjs = ( Path( self.nodes[source] ), )
else:
#~ print "%d -> %d: %d paths" % ( source, destination, len( pathsTable[source][destination] ) )
pathObjs = []
for pathlist in pathsTable[source][destination]:
pathObj = Path( pathlist[0] )
for node in pathlist[1:]:
pathObj.append( node )
pathObjs.append( pathObj )
#~ print self.pathToIndexes( path )
self.nodes[source].routes[destination] = pathObjs
pathsTable[source][destination] = None
#~ self.nodes[source].routes[destination] = pathsTable[source][destination]
def pathToIndexes( self, path ):
"""Converts a path object into a list of node indexes."""
indexes = []
for node in path.path:
indexes.append( self.nodes.index( node ) )
return indexes
def drawScript( self, paths=None ):
"""Returns a list of node coordinates in the format for my
network topology drawing program (drawnetwork)."""
output = "area\t%f\t%f\n\n" % ( self.size[0], self.size[1] )
for node in self.nodes:
output += "node\t%f\t%f\n" % ( node.x, node.y )
output += "\n"
if paths:
for path in paths:
output += "route\t"
output += "\t".join( [ str( index ) for index in self.pathToIndexes( path ) ] )
output += "\n"
return output
class NS2ScriptBuilder:
def __init__( self, network ):
self.topologyToNsIndex = {}
self.routeData = ''
self.network = network
self.numFlows = 0
## Returns the header of the ns2 file with the topology of the network.
## Routing must be added seperately.
def getScript( self ):
ns2File = """
# ======================================================================
# Define options
# ======================================================================
set val(chan) Channel/WirelessChannel ;# channel type
set val(prop) Propagation/TwoRayGround ;# radio-propagation model
set val(netif) Phy/WirelessPhy ;# network interface type
set val(mac) Mac/802_11 ;# MAC type
set val(ifq) Queue/DropTail/PriQueue ;# interface queue type
set val(ll) LL ;# link laset val(chan) Channel/WirelessChannel ;# channel type
set val(ant) Antenna/OmniAntenna ;# antenna model
set val(ifqlen) 50 ;# max packet in ifq
set val(nn) %d ;# Number of nodes to create
set val(rp) DSDV ;# routing protocol
if { $argc != 3 && $argc != 2 } {
puts "ERROR: Supply file.tcl <random seed> (<packet interval>) <trace file>"
exit 1
}
ns-random [lindex $argv 0]
set packetInterval [lindex $argv 1]
set tracefile [lindex $argv [expr $argc-1]]
set ns_ [new Simulator]
$ns_ use-newtrace
set tracefd [open $tracefile w]
$ns_ trace-all $tracefd
Mac/802_11 set dataRate_ 1Mb
#Mac/802_11 set CWMax_ 31
# Disable RTS
Mac/802_11 set RTSThreshold_ 3000
# Switch to the short preamble
#Mac/802_11 set PreambleLength_ 72
Application/Traffic/CBR set random_ 2 ;# Specifies the "small random variation" setting
# set up topography object
set topo [new Topography]
$topo load_flatgrid %d %d
# Create GOD object
create-god $val(nn)
# Create a new channel
set channel1_ [new $val(chan)]
# configure node
$ns_ node-config -adhocRouting $val(rp) \
-llType $val(ll) \
-macType $val(mac) \
-ifqType $val(ifq) \
-ifqLen $val(ifqlen) \
-antType $val(ant) \
-propType $val(prop) \
-phyType $val(netif) \
-channel $channel1_ \
-topoInstance $topo \
-agentTrace ON \
-routerTrace ON \
-macTrace OFF \
-movementTrace OFF
for {set i 0} {$i < $val(nn) } {incr i} {
set node_($i) [$ns_ node]
$node_($i) random-motion 0 ;# disable random motion
$node_($i) set X_ 0.0
$node_($i) set Y_ 0.0
$node_($i) set Z_ 0.0
}
proc stop {} {
global ns_ tracefd
$ns_ flush-trace
close $tracefd
$ns_ halt
}
""" % ( len( self.topologyToNsIndex ), self.network.size[0], self.network.size[1] )
# The parameters are number of nodes, X x Y dimensions,
# Place the nodes
for node, index in self.topologyToNsIndex.iteritems():
ns2File += "$node_(%d) set X_ %f\n$node_(%d) set Y_ %f\n" % ( index, node.x, index, node.y )
ns2File += self.routeData
ns2File += self.ns2Footer()
return ns2File
def ns2UDPAgent( self ):
return """
# Setup traffic flow
set agent(%d) [new Agent/UDP]
$agent(%d) set packetSize_ 1500
set app(%d) [new | |
/ 2) + 30)
y = 0
while left > 0 :
if getTile(x + dX, y) != Tiles.NOTHING:
setTile(x + dX, y, Tiles.SAND)
left -= 1
y += 1
print("Rocks in dirt")
for i in range(1000):
x = random.randrange(0, WORLD_WIDTH)
y = random.randrange(0, WORLD_HEIGHT // 2.8)
if getTile(x, y) == Tiles.DIRT:
clump(x, y, poisson(10), Tiles.STONE, True, 0, 0)
print("Dirt in rocks")
for i in range(3000):
x = random.randrange(0, WORLD_WIDTH)
y = random.randrange(WORLD_HEIGHT // 3.2, WORLD_HEIGHT)
if getTile(x, y) == Tiles.STONE:
clump(x, y, poisson(10), Tiles.DIRT, True, 0, 0)
print("Clay")
for i in range(1000):
x = random.randrange(0, WORLD_WIDTH)
y = random.randrange(0, WORLD_HEIGHT // 4.2)
if getTile(x, y) == Tiles.DIRT:
clump(x, y, poisson(10), Tiles.CLAY, True, 0, 0)
print("Small holes")
for i in range(50):
x = random.randrange(0, WORLD_WIDTH)
y = random.randrange(WORLD_HEIGHT // 3.2, WORLD_HEIGHT)
clump(x, y, poisson(50), Tiles.WATER, False, 0, 0)
for i in range(150):
x = random.randrange(0, WORLD_WIDTH)
y = random.randrange(WORLD_HEIGHT // 3.2, WORLD_HEIGHT)
clump(x, y, poisson(50), Tiles.NOTHING, True, 0, 0)
print("Grass")
for x in range(WORLD_WIDTH):
for y in range(int(WORLD_HEIGHT // 2.8)):
if getTile(x, y) == Tiles.DIRT:
setTile(x, y, Tiles.GRASS)
if getTile(x - 1, y) == Tiles.NOTHING or getTile(x + 1, y) == Tiles.NOTHING:
setTile(x, y + 1, Tiles.GRASS)
break
elif getTile(x, y) != Tiles.NOTHING:
break
for x in range(WORLD_WIDTH):
for y in range(int(WORLD_HEIGHT // 2.8)):
if getTile(x, y) == Tiles.DIRT and (
getTile(x - 1, y) == Tiles.NOTHING
or getTile(x + 1, y) == Tiles.NOTHING
or getTile(x, y - 1) == Tiles.NOTHING
or getTile(x, y + 1) == Tiles.NOTHING):
setTile(x, y, Tiles.GRASS)
print("Jungle")
width = poisson(160)
x = random.choice([WORLD_WIDTH // 5, WORLD_WIDTH - WORLD_WIDTH // 5 - width])
for y in range(WORLD_HEIGHT):
dX = 0
pX = int(perlin_gud(y / 60.0, 6.17) * 30.0)
random.seed(y)
while getTile(x + dX + pX, y) == Tiles.NOTHING:
dX += 1
while dX < width:
tile = getTile(x + dX + pX, y)
if dX < 20 or width - dX < 20:
num = (20 - dX) if dX < 20 else (20 - (width - dX))
if random.randrange(num) > 5:
dX += 1
continue
if tile == Tiles.DIRT or tile == Tiles.STONE or tile == Tiles.SAND:
setTile(x + dX + pX, y, Tiles.MUD)
elif tile == Tiles.GRASS:
setTile(x + dX + pX, y, Tiles.JUNGLEGRASS)
dX += 1
for dX in range(width):
for y in range(WORLD_HEIGHT):
pX = int(perlin_gud(y / 60.0, 6.17) * 30.0)
if getTile(x + dX + pX, y) == Tiles.MUD and (
getTile(x + dX + pX - 1, y) == Tiles.NOTHING
or getTile(x + dX + pX + 1, y) == Tiles.NOTHING
or getTile(x + dX + pX, y - 1) == Tiles.NOTHING
or getTile(x + dX + pX, y + 1) == Tiles.NOTHING):
setTile(x + dX + pX, y, Tiles.JUNGLEGRASS)
print("Snow Biome")
width = poisson(110)
if (x < WORLD_WIDTH // 2):
x = WORLD_WIDTH - WORLD_WIDTH // 5 - width
else:
x = WORLD_WIDTH // 5
for y in range(int(WORLD_HEIGHT / 1.5)):
dX = 0
pX = int(perlin_gud(y / 60.0, 6.17) * 20.0)
random.seed(y)
while getTile(x + dX + pX, y) == Tiles.NOTHING:
dX += 1
while dX < width:
tile = getTile(x + dX + pX, y)
if dX < 20 or width - dX < 20:
num = (20 - dX) if dX < 20 else (20 - (width - dX))
if random.randrange(num) > 5:
dX += 1
continue
if ((int(WORLD_HEIGHT / 1.5) - y) < 20):
num = 20 - (int(WORLD_HEIGHT / 1.5) - y)
if random.randrange(num) > 5:
dX += 1
continue
if tile == Tiles.STONE:
setTile(x + dX + pX, y, Tiles.ICE)
elif tile != Tiles.NOTHING: # == Tiles.DIRT or tile == Tiles.GRASS or tile == Tiles.SAND
setTile(x + dX + pX, y, Tiles.SNOW)
dX += 1
for dX in range(width):
for y in range(int(WORLD_HEIGHT / 1.5)):
pX = int(perlin_gud(y / 60.0, 6.17) * 20.0)
if getTile(x + dX + pX, y) == Tiles.ICE and (
getTile(x + dX + pX - 1, y) == Tiles.NOTHING
or getTile(x + dX + pX + 1, y) == Tiles.NOTHING
or getTile(x + dX + pX, y - 1) == Tiles.NOTHING
or getTile(x + dX + pX, y + 1) == Tiles.NOTHING) and False:
setTile(x + dX + pX, y, Tiles.SNOW)
print("Shinies")
for i in range(750):
x = random.randrange(WORLD_WIDTH)
y = random.randrange(WORLD_HEIGHT)
if getTile(x, y) != Tiles.SAND and getTile(x, y) != Tiles.SNOW and getTile(x, y) != Tiles.ICE:
clump(x, y, poisson(6), Tiles.IRON, True, 0, 0)
if random.randrange(0, 2) == 0:
for i in range(750):
x = random.randrange(WORLD_WIDTH)
y = random.randrange(WORLD_HEIGHT)
if getTile(x, y) != Tiles.SAND and getTile(x, y) != Tiles.SNOW and getTile(x, y) != Tiles.ICE:
clump(x, y, poisson(8), Tiles.COPPER, True, 0, 0)
else:
for i in range(750):
x = random.randrange(WORLD_WIDTH)
y = random.randrange(WORLD_HEIGHT)
if getTile(x, y) != Tiles.SAND and getTile(x, y) != Tiles.SNOW and getTile(x, y) != Tiles.ICE:
clump(x, y, poisson(10), Tiles.TIN, True, 0, 0)
print("Lakes")
for i in range(0): # max(2, poisson(4)), lakes are causing some problems rn
x = random.randrange(75, WORLD_WIDTH - 75)
width = max(15, poisson(15))
depth = max(5, poisson(10))
leftY = WORLD_HEIGHT // 5
while getTile(x, leftY) == Tiles.NOTHING:
leftY += 1
if getTile(x, leftY + 6) == Tiles.NOTHING: leftY += 6
rightY = WORLD_HEIGHT // 5
while getTile(x + width, rightY) == Tiles.NOTHING:
rightY += 1
if getTile(x + width, rightY + 6) == Tiles.NOTHING: rightY += 6
y = max(leftY, rightY)
parabola(x, y, width, depth, Tiles.WATER, 2)
print("Beaches")
leftY = 0
while getTile(60, leftY) == Tiles.NOTHING:
leftY += 1
rightY = 0
while getTile(WORLD_WIDTH - 60, rightY) == Tiles.NOTHING:
rightY += 1
# Left beach
parabola(-62, leftY, 124, 32, Tiles.DIRT, 2)
parabola(-60, leftY, 120, 30, Tiles.SAND, 2)
parabola(-50, leftY + 2, 100, 20, Tiles.WATER, 2)
# Right beach
parabola(WORLD_WIDTH - 62, rightY, 124, 32, Tiles.DIRT, 2)
parabola(WORLD_WIDTH - 60, rightY, 120, 30, Tiles.SAND, 2)
parabola(WORLD_WIDTH - 50, rightY + 2, 100, 20, Tiles.WATER, 2)
print("Gravitating Sand")
for x in range(WORLD_WIDTH):
for y in range(WORLD_HEIGHT, 0, -1):
if getTile(x, y) == Tiles.SAND and getTile(x, y + 1) == Tiles.NOTHING:
tempY = y
while tempY < WORLD_HEIGHT - 1 and getTile(x, tempY + 1) == Tiles.NOTHING:
tempY += 1
setTile(x, tempY, Tiles.SAND)
setTile(x, y, Tiles.NOTHING)
print("Wet Jungle")
for x in range(1, WORLD_WIDTH):
for y in range(WORLD_HEIGHT // 6, WORLD_HEIGHT // 2):
if getTile(x, y) == Tiles.MUD:
if getTile(x + 1, y) == Tiles.NOTHING and random.randrange(0, 3) == 0:
setTile(x + 1, y, Tiles.WATER)
if getTile(x - 1, y) == Tiles.NOTHING and random.randrange(0, 3) == 0:
setTile(x - 1, y, Tiles.WATER)
if getTile(x, y - 1) == Tiles.NOTHING and random.randrange(0, 3) == 0:
setTile(x , y - 1, Tiles.WATER)
ySave = 0
print("Smooth World")
for i in range(WORLD_SMOOTH_PASSES):
for passDir in range(2):
for y in range(WORLD_HEIGHT):
if getTile((WORLD_WIDTH - 1) if passDir else 0, y) != Tiles.NOTHING:
ySave = y
break
for x in range((WORLD_WIDTH - 1) if passDir else 0, 0 if passDir else WORLD_WIDTH, -1 if passDir else 1):
y = 0
while getTile(x, y) == Tiles.NOTHING:
y += 1
deltaY = ySave - y
tile = getTile(x, y)
if deltaY > (2 if tile != Tiles.SAND else 1) and getTile(x, y + 6) != Tiles.NOTHING:
for dY in range(min(deltaY - 1, 2)):
setTile(x, y + dY, Tiles.NOTHING)
ySave = y + deltaY - 1
else:
ySave = y
print("Cobwebs")
for i in range(250):
for tries in range(1000):
x = random.randrange(0, WORLD_WIDTH)
y = random.randrange(WORLD_HEIGHT // 2.4, WORLD_HEIGHT - (WORLD_HEIGHT // 6.8))
if getTile(x, y) != Tiles.NOTHING and getTile(x, y) != Tiles.VINE and getTile(x, y) != Tiles.COBWEB:
continue
canPlace = 0
new_y = y
for j in range(y - 1, -1, -1):
if(getTile(x, j) != Tiles.NOTHING):
canPlace = 1
break
else:
new_y = j
if(canPlace):
clump(x, new_y, poisson(9), Tiles.COBWEB, False, 0, 0)
break
print("Life | |
<filename>jiant/tasks/lib/templates/shared.py
import numpy as np
from dataclasses import dataclass
from typing import List, NamedTuple, Tuple
from jiant.tasks.core import FeaturizationSpec
from jiant.tasks.utils import truncate_sequences, pad_to_max_seq_length
from jiant.utils.python.datastructures import BiMap
MAX_SUB_TOKEN_LENGTH = 5
MAX_CONCEPT_LENGTH = 512
MAX_RELATION_LENGTH = 512
class Span(NamedTuple):
start: int
end: int # Use exclusive end, for consistency
def add(self, i: int):
return Span(start=self.start + i, end=self.end + i)
def to_slice(self):
return slice(*self)
def to_array(self):
return np.array([self.start, self.end])
@dataclass
class UnpaddedInputs:
unpadded_tokens: List
unpadded_segment_ids: List
cls_offset: int
@dataclass
class UnpaddedAMRInputs:
unpadded_concepts: List[List[str]]
unpadded_relation_ids: List[Tuple[int, int]]
unpadded_relation_labels: List[List[str]]
@dataclass
class InputSet:
input_ids: List
input_mask: List
segment_ids: List
@dataclass
class AMRInputSet:
concept_sub_token_ids: List[List[int]]
concept_sub_token_mask: List[List[int]]
relation_ids: List[Tuple[int, int]]
relation_id_mask: List[int]
relation_label_sub_token_ids: List[List[int]]
relation_label_sub_token_mask: List[List[int]]
def single_sentence_featurize(
guid: str,
input_tokens: List[str],
label_id: int,
tokenizer,
feat_spec: FeaturizationSpec,
data_row_class,
):
unpadded_inputs = construct_single_input_tokens_and_segment_ids(
input_tokens=input_tokens, tokenizer=tokenizer, feat_spec=feat_spec,
)
return create_generic_data_row_from_tokens_and_segments(
guid=guid,
unpadded_tokens=unpadded_inputs.unpadded_tokens,
unpadded_segment_ids=unpadded_inputs.unpadded_segment_ids,
label_id=label_id,
tokenizer=tokenizer,
feat_spec=feat_spec,
data_row_class=data_row_class,
)
def double_sentence_featurize(
guid: str,
input_tokens_a: List[str],
input_tokens_b: List[str],
label_id: int,
tokenizer,
feat_spec: FeaturizationSpec,
data_row_class,
):
"""Featurize an example for a two-input/two-sentence task, and return the example as a DataRow.
Args:
guid (str): human-readable identifier for interpretability and debugging.
input_tokens_a (List[str]): sequence of tokens in segment a.
input_tokens_b (List[str]): sequence of tokens in segment b.
label_id (int): int representing the label for the task.
tokenizer:
feat_spec (FeaturizationSpec): Tokenization-related metadata.
data_row_class (DataRow): DataRow class used in the task.
Returns:
DataRow representing an example.
"""
unpadded_inputs = construct_double_input_tokens_and_segment_ids(
input_tokens_a=input_tokens_a,
input_tokens_b=input_tokens_b,
tokenizer=tokenizer,
feat_spec=feat_spec,
)
return create_generic_data_row_from_tokens_and_segments(
guid=guid,
unpadded_tokens=unpadded_inputs.unpadded_tokens,
unpadded_segment_ids=unpadded_inputs.unpadded_segment_ids,
label_id=label_id,
tokenizer=tokenizer,
feat_spec=feat_spec,
data_row_class=data_row_class,
)
def double_sentence_with_amr_featurize(
guid: str,
input_tokens_a: List[str],
input_amr_concepts_a: List[List[str]],
input_amr_relation_ids_a: List[Tuple[int, int]],
input_amr_relation_labels_a: List[List[str]],
input_tokens_b: List[str],
input_amr_concepts_b: List[List[str]],
input_amr_relation_ids_b: List[Tuple[int, int]],
input_amr_relation_labels_b: List[List[str]],
label_id: int,
tokenizer,
feat_spec: FeaturizationSpec,
data_row_class,
):
"""Featurize an example for a two-input/two-sentence with AMR task, and return the example as a DataRow.
Args:
guid (str): human-readable identifier for interoperability and debugging.
input_tokens_a (List[str]): sequence of tokens in segment a.
input_amr_concepts_a (List[List[str]]): sequence of sub tokens of concepts in AMR a.
input_amr_relation_ids_a (List[(int, int)]): sequence of (source, target)
based on concept indices for relations in AMR a.
input_amr_relation_labels_a (List[List[str]]): sequence of sub tokens of relation labels in AMR a.
input_tokens_b (List[str]): sequence of tokens in segment b.
input_amr_concepts_b (List[List[str]]): sequence of sub tokens of concepts in AMR b.
input_amr_relation_ids_b (List[(int, int)]): sequence of (source, target)
based on concept indices for relations in AMR b.
input_amr_relation_labels_b (List[List[str]]): sequence of sub tokens of relation labels in AMR b.
label_id (int): int representing the label for the task.
tokenizer:
feat_spec (FeaturizationSpec): Tokenization-related metadata.
data_row_class (DataRow): DataRow class used in the task.
Returns:
DataRow representing an example.
"""
unpadded_inputs = construct_double_input_tokens_and_segment_ids(
input_tokens_a=input_tokens_a,
input_tokens_b=input_tokens_b,
tokenizer=tokenizer,
feat_spec=feat_spec,
)
unpadded_amr_inputs = construct_double_input_amr_concepts_and_relations(
input_amr_concepts_a=input_amr_concepts_a,
input_amr_relation_ids_a=input_amr_relation_ids_a,
input_amr_relation_labels_a=input_amr_relation_labels_a,
input_amr_concepts_b=input_amr_concepts_b,
input_amr_relation_ids_b=input_amr_relation_ids_b,
input_amr_relation_labels_b=input_amr_relation_labels_b,
tokenizer=tokenizer,
feat_spec=feat_spec,
)
return create_generic_data_row_with_amr(
guid=guid,
unpadded_tokens=unpadded_inputs.unpadded_tokens,
unpadded_segment_ids=unpadded_inputs.unpadded_segment_ids,
unpadded_concepts=unpadded_amr_inputs.unpadded_concepts,
unpadded_relation_ids=unpadded_amr_inputs.unpadded_relation_ids,
unpadded_relation_labels=unpadded_amr_inputs.unpadded_relation_labels,
label_id=label_id,
tokenizer=tokenizer,
feat_spec=feat_spec,
data_row_class=data_row_class,
)
def construct_single_input_tokens_and_segment_ids(
input_tokens: List[str], tokenizer, feat_spec: FeaturizationSpec
):
special_tokens_count = 2 # CLS, SEP
(input_tokens,) = truncate_sequences(
tokens_ls=[input_tokens], max_length=feat_spec.max_seq_length - special_tokens_count,
)
return add_cls_token(
unpadded_tokens=input_tokens + [tokenizer.sep_token],
unpadded_segment_ids=(
[feat_spec.sequence_a_segment_id]
+ [feat_spec.sequence_a_segment_id] * (len(input_tokens))
),
tokenizer=tokenizer,
feat_spec=feat_spec,
)
def construct_double_input_tokens_and_segment_ids(
input_tokens_a: List[str], input_tokens_b: List[str], tokenizer, feat_spec: FeaturizationSpec
):
"""Create token and segment id sequences, apply truncation, add separator and class tokens.
Args:
input_tokens_a (List[str]): sequence of tokens in segment a.
input_tokens_b (List[str]): sequence of tokens in segment b.
tokenizer:
feat_spec (FeaturizationSpec): Tokenization-related metadata.
Returns:
UnpaddedInputs: unpadded inputs with truncation applied and special tokens appended.
"""
if feat_spec.sep_token_extra:
maybe_extra_sep = [tokenizer.sep_token]
maybe_extra_sep_segment_id = [feat_spec.sequence_a_segment_id]
special_tokens_count = 4 # CLS, SEP-SEP, SEP
else:
maybe_extra_sep = []
maybe_extra_sep_segment_id = []
special_tokens_count = 3 # CLS, SEP, SEP
input_tokens_a, input_tokens_b = truncate_sequences(
tokens_ls=[input_tokens_a, input_tokens_b],
max_length=feat_spec.max_seq_length - special_tokens_count,
)
unpadded_tokens = (
input_tokens_a
+ [tokenizer.sep_token]
+ maybe_extra_sep
+ input_tokens_b
+ [tokenizer.sep_token]
)
unpadded_segment_ids = (
[feat_spec.sequence_a_segment_id] * len(input_tokens_a)
+ [feat_spec.sequence_a_segment_id]
+ maybe_extra_sep_segment_id
+ [feat_spec.sequence_b_segment_id] * len(input_tokens_b)
+ [feat_spec.sequence_b_segment_id]
)
return add_cls_token(
unpadded_tokens=unpadded_tokens,
unpadded_segment_ids=unpadded_segment_ids,
tokenizer=tokenizer,
feat_spec=feat_spec,
)
def construct_double_input_amr_concepts_and_relations(
input_amr_concepts_a: List[List[str]],
input_amr_relation_ids_a: List[Tuple[int, int]],
input_amr_relation_labels_a: List[List[str]],
input_amr_concepts_b: List[List[str]],
input_amr_relation_ids_b: List[Tuple[int, int]],
input_amr_relation_labels_b: List[List[str]],
tokenizer,
feat_spec: FeaturizationSpec,
):
""" Merge concepts, relation ids and labels from 2 AMRs, apply truncation.
Args:
input_amr_concepts_a (List[List[str]]): sequence of sub tokens of concepts in AMR a.
input_amr_relation_ids_a (List[(int, int)]):
sequence of (source, target) based on concept indices for relations in AMR a.
input_amr_relation_labels_a (List[List[str]]): sequence of sub tokens of relation labels in AMR a.
input_amr_concepts_b (List[List[str]]): sequence of sub tokens of concepts in AMR b.
input_amr_relation_ids_b (List[(int, int)]):
sequence of (source, target) based on concept indices for relations in AMR b.
input_amr_relation_labels_b (List[List[str]]): sequence of sub tokens of relation labels in AMR b.
tokenizer:
feat_spec (FeaturizationSpec): Tokenization-related metadata.
Returns:
UnpaddedAMRInputs: unpadded merged AMR inputs.
"""
# TODO: 1、sub token长度裁剪;2、concepts长度裁剪,相应修改relation ids和labels;3、合并,相应修改relation ids
input_amr_concepts_a = sum([truncate_sequences(tokens_ls=[concept], max_length=MAX_SUB_TOKEN_LENGTH)
for concept in input_amr_concepts_a], [])
input_amr_concepts_b = sum([truncate_sequences(tokens_ls=[concept], max_length=MAX_SUB_TOKEN_LENGTH)
for concept in input_amr_concepts_b], [])
input_amr_relation_labels_a = sum([truncate_sequences(tokens_ls=[label], max_length=MAX_SUB_TOKEN_LENGTH)
for label in input_amr_relation_labels_a], [])
input_amr_relation_labels_b = sum([truncate_sequences(tokens_ls=[label], max_length=MAX_SUB_TOKEN_LENGTH)
for label in input_amr_relation_labels_b], [])
input_amr_concepts_a, input_amr_concepts_b = truncate_sequences(
tokens_ls=[input_amr_concepts_a, input_amr_concepts_b], max_length=MAX_CONCEPT_LENGTH)
truncate_input_amr_relation_ids_a = []
truncate_input_amr_relation_labels_a = []
truncate_input_amr_relation_ids_b = []
truncate_input_amr_relation_labels_b= []
length_a = len(input_amr_concepts_a)
length_b = len(input_amr_concepts_b)
for relation_id, relation_label in zip(input_amr_relation_ids_a, input_amr_relation_labels_a):
source, target = relation_id
if source < length_a and target < length_a:
truncate_input_amr_relation_ids_a.append(relation_id)
truncate_input_amr_relation_labels_a.append(relation_label)
for relation_id, relation_label in zip(input_amr_relation_ids_b, input_amr_relation_labels_b):
source, target = relation_id
if source < length_b and target < length_b:
truncate_input_amr_relation_ids_b.append([source + length_a, target + length_a])
truncate_input_amr_relation_labels_b.append(relation_label)
truncate_input_amr_relation_ids_a, truncate_input_amr_relation_ids_b = truncate_sequences(
tokens_ls=[truncate_input_amr_relation_ids_a, truncate_input_amr_relation_ids_b],
max_length=MAX_RELATION_LENGTH)
truncate_input_amr_relation_labels_a, truncate_input_amr_relation_labels_b = truncate_sequences(
tokens_ls=[truncate_input_amr_relation_labels_a, truncate_input_amr_relation_labels_b],
max_length=MAX_RELATION_LENGTH)
return UnpaddedAMRInputs(
unpadded_concepts=input_amr_concepts_a + input_amr_concepts_b,
unpadded_relation_ids=truncate_input_amr_relation_ids_a + truncate_input_amr_relation_ids_b,
unpadded_relation_labels=truncate_input_amr_relation_labels_a + truncate_input_amr_relation_labels_b,
)
def add_cls_token(
unpadded_tokens: List[str],
unpadded_segment_ids: List[int],
tokenizer,
feat_spec: FeaturizationSpec,
):
"""Add class token to unpadded inputs.
Applies class token to end (or start) of unpadded inputs depending on FeaturizationSpec.
Args:
unpadded_tokens (List[str]): sequence of unpadded token strings.
unpadded_segment_ids (List[str]): sequence of unpadded segment ids.
tokenizer:
feat_spec (FeaturizationSpec): Tokenization-related metadata.
Returns:
UnpaddedInputs: unpadded inputs with class token appended.
"""
if feat_spec.cls_token_at_end:
return UnpaddedInputs(
unpadded_tokens=unpadded_tokens + [tokenizer.cls_token],
unpadded_segment_ids=unpadded_segment_ids + [feat_spec.cls_token_segment_id],
cls_offset=0,
)
else:
return UnpaddedInputs(
unpadded_tokens=[tokenizer.cls_token] + unpadded_tokens,
unpadded_segment_ids=[feat_spec.cls_token_segment_id] + unpadded_segment_ids,
cls_offset=1,
)
def create_generic_data_row_from_tokens_and_segments(
guid: str,
unpadded_tokens: List[str],
unpadded_segment_ids: List[int],
label_id: int,
tokenizer,
feat_spec: FeaturizationSpec,
data_row_class,
):
"""Creates an InputSet and wraps the InputSet into a DataRow class.
Args:
guid (str): human-readable identifier (for interpretability and debugging).
unpadded_tokens (List[str]): sequence of unpadded token strings.
unpadded_segment_ids (List[int]): sequence of unpadded segment ids.
label_id (int): int representing the label for the task.
tokenizer:
feat_spec (FeaturizationSpec): Tokenization-related metadata.
data_row_class (DataRow): data row class to wrap and return the inputs.
Returns:
DataRow: data row class containing model inputs.
"""
input_set = create_input_set_from_tokens_and_segments(
unpadded_tokens=unpadded_tokens,
unpadded_segment_ids=unpadded_segment_ids,
tokenizer=tokenizer,
feat_spec=feat_spec,
)
return data_row_class(
guid=guid,
input_ids=np.array(input_set.input_ids),
input_mask=np.array(input_set.input_mask),
segment_ids=np.array(input_set.segment_ids),
label_id=label_id,
tokens=unpadded_tokens,
)
def create_generic_data_row_with_amr(
guid: str,
unpadded_tokens: List[str],
unpadded_segment_ids: List[int],
unpadded_concepts: List[List[str]],
unpadded_relation_ids: List[Tuple[int, int]],
unpadded_relation_labels: List[List[str]],
label_id: int,
tokenizer,
feat_spec: FeaturizationSpec,
data_row_class,
):
"""Creates an InputSet and wraps the InputSet into a DataRow class.
Args:
guid (str): human-readable identifier (for interpretability and debugging).
unpadded_tokens (List[str]): sequence of unpadded token strings.
unpadded_segment_ids (List[int]): sequence of unpadded segment ids.
unpadded_concepts (List[List[str]]): sequence of unpadded sub tokens of AMR concepts.
unpadded_relation_ids (List[(int, int)]): sequence of unpadded (source, target)
based on concept indices for AMR relations.
unpadded_relation_labels (List[List[str]]): sequence of unpadded sub tokens of AMR relation labels.
label_id (int): int representing the label for the task.
tokenizer:
feat_spec (FeaturizationSpec): Tokenization-related metadata.
data_row_class (DataRow): data row class to wrap and return the inputs.
Returns:
DataRow: data row class containing model inputs.
"""
input_set = create_input_set_from_tokens_and_segments(
unpadded_tokens=unpadded_tokens,
unpadded_segment_ids=unpadded_segment_ids,
tokenizer=tokenizer,
feat_spec=feat_spec,
)
amr_input_set = create_amr_input_set(
unpadded_concepts=unpadded_concepts,
unpadded_relation_ids=unpadded_relation_ids,
unpadded_relation_labels=unpadded_relation_labels,
tokenizer=tokenizer,
feat_spec=feat_spec,
)
return data_row_class(
guid=guid,
input_ids=np.array(input_set.input_ids),
input_mask=np.array(input_set.input_mask),
segment_ids=np.array(input_set.segment_ids),
input_concept_ids=np.array(amr_input_set.concept_sub_token_ids),
input_concept_mask=np.array(amr_input_set.concept_sub_token_mask),
input_relation_ids=np.array(amr_input_set.relation_ids),
input_relation_id_mask=np.array(amr_input_set.relation_id_mask),
input_relation_label_ids=np.array(amr_input_set.relation_label_sub_token_ids),
input_relation_label_mask=np.array(amr_input_set.relation_label_sub_token_mask),
label_id=label_id,
tokens=unpadded_tokens,
)
def create_input_set_from_tokens_and_segments(
unpadded_tokens: List[str],
unpadded_segment_ids: List[int],
tokenizer,
feat_spec: FeaturizationSpec,
):
"""Create padded inputs for model.
Converts tokens to ids, makes input set (input ids, input mask, and segment ids), adds padding.
Args:
unpadded_tokens (List[str]): unpadded list of token strings.
unpadded_segment_ids (List[int]): unpadded list of segment ids.
tokenizer:
feat_spec (FeaturizationSpec): Tokenization-related metadata.
Returns:
Padded input set.
"""
assert len(unpadded_tokens) == len(unpadded_segment_ids)
input_ids = tokenizer.convert_tokens_to_ids(unpadded_tokens)
input_mask = [1] * len(input_ids)
input_set = pad_features_with_feat_spec(
input_ids=input_ids,
input_mask=input_mask,
unpadded_segment_ids=unpadded_segment_ids,
feat_spec=feat_spec,
)
return input_set
def create_amr_input_set(
unpadded_concepts: List[List[str]],
unpadded_relation_ids: List[Tuple[int, int]],
unpadded_relation_labels: List[List[str]],
tokenizer,
feat_spec: FeaturizationSpec,
):
"""Create padded inputs for model.
Converts tokens to |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.