input stringlengths 2.65k 237k | output stringclasses 1
value |
|---|---|
in {("list", "dyn"), ("list", "var")}:
self.interpreter_error("Unhandled array type [%s], for now only [dyn] and [var] are supported" % (ty[1],))
if not isinstance(value, ArrayVariable):
self.interpreter_error("Type check failure, value should be an array: %r : %r" % (value, ty))
return
# Handle all scalar cases
if isinstance(value, ArrayVariable):
self.interpreter_error("Type check failure, value shouldn't be an array: %r : %r" % (value, ty))
elif ty == "dyn":
# Everything can be a dyn.
return
elif ty == "var":
if not value.IS_VAR:
self.interpreter_error("Type check failure, value should be a variable: %r : %r" % (value, ty))
elif ty == "const":
if value.LAYER < LAYER_CONST:
self.interpreter_error("Type check failure, value should be const: %r : %r" % (value, ty))
elif ty in concrete_mapping:
if (not isinstance(value, CompileTimeData)) or value.ty not in concrete_mapping[ty]:
self.interpreter_error("Type check failure, value should be %s: %r" % (ty, value))
else:
self.interpreter_error("Bug: Unimplemented type in check: %r : %r" % (value, ty))
def overlay_json(self, dest, source):
if not isinstance(dest, dict):
self.interpreter_error("Bug: Overlay target isn't a dict? %r" % (dest,))
if not isinstance(source, dict):
self.interpreter_error("Overlay must be a dict, not: %r" % (source,))
for k, v in source.items():
if k in dest and isinstance(dest[k], dict) and isinstance(v, dict):
self.overlay_json(dest[k], v)
else:
dest[k] = v
def perform_compile_time_operator(self, op_name, args):
if op_name == "+":
if len(args) == 1:
result = args[0]
else:
result = args[0] + args[1]
elif op_name == "-":
if len(args) == 1:
result = -args[0]
else:
result = args[0] - args[1]
elif op_name == "*":
result = args[0] * args[1]
elif op_name == "/":
result = args[0] / args[1]
elif op_name == "%":
result = args[0] % args[1]
elif op_name == "^":
result = args[0] ** args[1]
elif op_name == "..":
result = args[0], args[1]
elif op_name == "||":
result = args[0] or args[1]
elif op_name == "&&":
result = args[0] or args[1]
elif op_name == "<":
result = args[0] < args[1]
elif op_name == ">":
result = args[0] > args[1]
elif op_name == "<=":
result = args[0] <= args[1]
elif op_name == ">=":
result = args[0] >= args[1]
elif op_name == "==":
result = args[0] == args[1]
elif op_name == "!=":
result = args[0] != args[1]
elif opname == "!":
result = not args[0]
else:
self.interpreter_error("Bug: Unimplemented compile-time operation: %r(%s)" % (
op_name, ", ".join(str(arg) for arg in args)
))
return CompileTimeData(type(result), result)
def evaluate_expr(self, scope, expr, purpose_name):
kind = expr[0]
if kind == "var":
_, var_name = expr
# Check if the variable is exempt from namespacing due to always being global.
if var_name not in self.always_global_variables:
var_name = prefix_join(scope["@name_prefix"], var_name)
# Check if this is an array variable.
base_var_name, _ = split_variable_name(var_name)
if base_var_name in self.array_variables:
return ArrayVariable(var_name, self.array_variables[base_var_name].length)
elif var_name in self.root_scope:
value = self.root_scope[var_name]
if not isinstance(value, Datum):
assert isinstance(value, Function)
value = CompileTimeData(type(value), value)
#self.interpreter_error("Function %s can't be used like a variable" % var_name)
return value
self.register_variable(var_name)
return self.all_variables[var_name]
elif kind == "prime":
_, var_expr = expr
var = self.evaluate_expr(scope, var_expr, purpose_name)
self.type_check_assert(var, "var")
new_name = var.name + "'"
self.register_variable(new_name)
return self.all_variables[new_name]
elif kind == "dot":
# Right now we only support indexing global.
_, lhs, name = expr
if lhs != ("var", "global"):
self.interpreter_error("For now global is the only thing that may appear on the left of a dot accessor")
return self.evaluate_expr({"@name_prefix": ""}, ("var", name), "BUGBUGBUG")
elif kind == "compvar":
return self.lookup(scope, expr[1])
elif kind == "lit":
_, ty_name, value = expr
return CompileTimeData({"int": int, "float": float, "str": str}[ty_name], value)
elif kind in {"unary-op", "binary-op"}:
op_name = expr[1]
op_args = [self.evaluate_expr(scope, arg_expr, purpose_name) for arg_expr in expr[2:]]
if op_name == "[]":
array_var, index = op_args
if not isinstance(array_var, ArrayVariable):
self.interpreter_error("Indexing must be done on an array variable, not: %r" % (array_var,))
if not index.LAYER == LAYER_COMPTIME and index.ty == int:
self.interpreter_error("Index into %r must be a compile-time integer, not: %r" % (array_var, index))
if not (0 <= index.value < array_var.length):
self.interpreter_error("Index %i out of range for %r" % (index.value, array_var))
array_var_base, array_var_ticks = split_variable_name(array_var.name)
magic_name = "%s[%i]%s" % (array_var_base, index.value, array_var_ticks)
self.register_variable(magic_name)
return self.all_variables[magic_name]
# Do constant folding here.
if all(arg.LAYER == LAYER_COMPTIME for arg in op_args):
return self.perform_compile_time_operator(op_name, [arg.value for arg in op_args])
return Expr(
layer=min(arg.LAYER for arg in op_args),
op=op_name,
args=op_args,
)
elif kind == "call":
_, fn_expr, arg_exprs, named_arg_exprs = expr
if fn_expr[0] == "var":
_, fn_name = fn_expr
fn = self.lookup(scope, fn_name)
else:
fn_obj = self.evaluate_expr(scope, fn_expr, purpose_name)
self.type_check_assert(fn_obj, "Function")
fn = fn_obj.value
fn_name = fn.name
#self.interpreter_error("For now only simple function calls are allowed")
if isinstance(fn, Datum):
self.interpreter_error("%s isn't a function; do you want to drop the parens?" % fn_name)
args = [self.evaluate_expr(scope, arg_expr, purpose_name) for arg_expr in arg_exprs]
named_args = {
name: self.evaluate_expr(scope, arg_expr, purpose_name)
for name, arg_expr in named_arg_exprs.items()
}
# If fn.args is a number, then it's a minimum number of arguments, and there is no type-safety.
if isinstance(fn.args, int):
if len(args) < fn.args:
self.interpreter_error("Function %s expected at least %i arguments, we passed %i" % (
fn.name, fn.args, len(args),
))
elif len(fn.args) != len(args):
# Make sure that our arguments line up.
self.interpreter_error("Function %s expected %i arguments, we passed %i" % (
fn.name, len(fn.args), len(args),
))
# Check all the named arguments.
optional_args_values = {}
for name, val in named_args.items():
if name not in fn.named_args:
if not fn.named_args:
self.interpreter_error("Function %s doesn't take any optional arguments (%s was passed)" % (fn.name, name))
self.interpreter_error("Function %s doesn't have an optional argument called %s (%s's optional arguments: %r)" % (
fn.name, name, fn.name, list(fn.named_args),
))
self.type_check_assert(val, fn.named_args[name])
optional_args_values[name] = val
subscope = scope.copy()
subscope["@purpose_name"] = purpose_name
subscope["@optional_args"] = optional_args_values
# Only update the name prefix if we're not immediately calling a builtin function.
if fn.do_prefix_names:
subscope["@name_prefix"] = prefix_join(scope["@name_prefix"], self.get_unique(fn_name))
if isinstance(fn.args, int):
subscope["@args"] = args
else:
for arg, (arg_name, arg_ty_annot) in zip(args, fn.args):
self.type_check_assert(arg, arg_ty_annot)
subscope[arg_name] = arg
# Perform the function call.
if isinstance(fn.body, list):
return self.execute(subscope, fn.body)
else:
return fn.body(self, subscope)
elif kind == "known-value":
# This path is just used internally by the compiler, and is not generated in any of our ASTs.
return expr[1]
self.interpreter_error("Bug! Unhandled expr: %r" % (expr,))
def evaluate_unit_expr(self, unit_expr):
kind = unit_expr[0]
if kind == "var":
return self.unit_system.parse_unit_name(unit_expr[1])
elif kind == "binary-op":
pass
#if
self.interpreter_error("Invalid operation in unit expression: %r" % (unit_expr,))
def register_variable(self, name):
if name not in self.all_variables:
self.all_variables[name] = StateVariable(name)
if name.endswith("'"):
self.register_variable(name[:-1])
return self.all_variables[name]
def make_realized_process(self, name, args):
index = len(self.realized_processes)
self.realized_processes.append((name, args))
return index
def obliterate_variable(self, base_name):
order = 0
while True:
name = base_name + "'" * order
if name not in self.all_variables:
break
self.all_variables.pop(name)
if name in self.variable_drivers:
self.variable_drivers.pop(name)
if name in self.variable_initializers:
self.variable_initializers.pop(name)
order += 1
def get_compilation_parameter(self, desired_name, default_value):
# First, determine the final name that we're giving to this request.
final_name = dict_insert_no_collision(self.compilation_parameters, desired_name, lambda final_name: final_name)
# If we have an external definition for this value then get this value.
return final_name, self.external_compilation_parameters.get(final_name, default_value)
def make_adjustable_parameter(self, desired_name, default_value, name_must_be_exact=True):
if desired_name not in self.adjustable_parameters:
adj_param = AdjustableParameter(desired_name, default_value)
self.adjustable_parameters[desired_name] = adj_param
return adj_param
elif name_must_be_exact:
self.interpreter_error("Attempt to redefine parameter: %s" % desired_name)
return dict_insert_no_collision(
self.adjustable_parameters,
desired_name,
lambda final_name: AdjustableParameter(final_name, default_value),
)
def compile_time_assign(self, scope, name, value, reassign=False):
if name in scope and not reassign:
self.interpreter_error("Redefinition of %s" % (name,))
scope[name] = value
def lookup(self, scope, name):
if name not in scope:
self.interpreter_error("Undefined name: %s" % (name,))
return scope[name]
def set_initializer(self, lhs, rhs):
if not lhs.IS_VAR:
self.interpreter_error("LHS of ~ must be a var, not: %r" % (lhs,))
if not rhs.LAYER >= LAYER_CONST:
self.interpreter_error("RHS of ~ must be constant.")
# We add a prime here because if a variable is initialized then it's dynamic, and thus has a derivative.
self.register_variable(lhs.name + "'")
if lhs.name in self.variable_initializers:
# TODO: Get both line numbers.
self.interpreter_error("Attempt to double-initialize %s" % lhs.name)
self.variable_initializers[lhs.name] = rhs
def set_driver(self, lhs, rhs):
if not lhs.IS_VAR:
self.interpreter_error("LHS of <- must be a var, | |
<gh_stars>1-10
from comet_ml import OfflineExperiment # needed at top for Comet plugin
from collections import defaultdict, OrderedDict
import torch
import torch.nn as nn
import tqdm
import time
from sklearn.metrics import f1_score, precision_score, recall_score
import torch.nn.functional as F
from utils import *
import configparser
LABEL_MAPPING = {0: 'hateful', 1: 'abusive', 2: 'normal', 3: 'spam'}
DEBUG = False
config = configparser.ConfigParser()
config.read(os.path.join(ROOT_DIR, 'config.ini'))
class ExperimentBuilder(nn.Module):
def __init__(self, network_model, device, hyper_params, data_map,
train_data, valid_data, test_data, experiment_flag,
data_provider, experiment):
"""
Initializes an ExperimentBuilder object. Such an object takes care of running training and evaluation of a deep net
on a given dataset. It also takes care of saving per epoch models and automatically inferring the best val model
to be used for evaluating the test set metrics.
"""
super(ExperimentBuilder, self).__init__()
self.experiment = experiment # comet experiment
self.experiment_flag = experiment_flag
self.experiment_name = hyper_params['experiment_name']
self.model = network_model
self.model.reset_parameters()
self.device = device
self.seed = hyper_params['seed']
self.num_epochs = hyper_params['num_epochs']
self.starting_epoch = 0
self.state = dict()
self.data_provider = data_provider
# re-initialize network parameters
self.data_map = data_map
self.train_data_raw = train_data
self.valid_data_raw = valid_data
self.test_data_raw = test_data
self.train_data = []
self.valid_data = []
self.test_data = []
self.train_data_tweets = None
self.valid_data_tweets = None
self.test_data_tweets = None
self.confusion_matrix = torch.zeros(4, 4) # number of classes
# build extra layer of model
self.preprocess_data()
self.criterion = nn.CrossEntropyLoss().to(self.device) # send the loss computation to the GPU
self.optimizer = torch.optim.Adam(self.model.parameters(), weight_decay=1e-4, lr=hyper_params["learning_rate"])
# self.scheduler = optim.lr_scheduler.CosineAnnealingLR(self.optimizer, T_max=self.num_epochs, eta_min=1e-4)
self.scheduler = None
# Generate the directory names
self.experiment_folder = hyper_params['results_dir']
self.experiment_saved_models = os.path.abspath(os.path.join(self.experiment_folder, "saved_models"))
# Set best models to be at 0 since we are just starting
self.best_val_model_idx = 0
self.best_val_model_criteria = 0.
if not os.path.exists(self.experiment_folder): # If experiment directory does not exist
os.makedirs(self.experiment_folder) # create the experiment directory
if not os.path.exists(self.experiment_saved_models):
os.makedirs(self.experiment_saved_models) # create the experiment saved models directory
def get_num_parameters(self):
total_num_params = 0
for param in self.parameters():
total_num_params += np.prod(param.shape)
return total_num_params
def forward_pass_helper(self, x):
tweet_input, feature_input = x[0], x[1]
if self.experiment_flag == 2 or self.experiment_flag == 4:
# tweet level processing
feature_input = feature_input.to(self.device)
tweet_input = tweet_input.to(self.device)
feature_out = self.model.forward(feature_input, layer_key='feature', flatten_flag=True)
tweet_out = self.model.forward(tweet_input, layer_key='tweet', flatten_flag=True)
out = torch.cat((tweet_out.cpu(), feature_out.cpu()), 1).to(self.device)
return self.model.layer_dict['fc_layer'](out)
elif self.experiment_flag == 5:
# tweet level processing
tweet_input = tweet_input.to(self.device)
tweet_out = self.model.forward(tweet_input, layer_key='tweet', flatten_flag=True)
feature_out = np.sum([self.model.forward(item.to(self.device), layer_key='user-timeline').cpu() for item in feature_input])
out = torch.cat((tweet_out.cpu(), feature_out.cpu()), 1).to(self.device)
return self.model.layer_dict['fc_layer'](out)
else: #experiments 1 & 3
tweet_input = tweet_input.to(self.device)
out = self.model.forward(tweet_input, flatten_flag=True, layer_key='tweet') # forward the data in the model
return self.model.layer_dict['fc_layer'](out)
def run_train_iter(self, x, y, stats, experiment_key='train'):
"""
Receives the inputs and targets for the model and runs a training iteration. Returns loss and accuracy metrics.
:param x: The inputs to the model. A numpy array of shape batch_size, channels, height, width
:param y: The targets for the model. A numpy array of shape batch_size, num_classes
:return: the loss and accuracy for this batch
"""
# sets model to training mode
# (in case batch normalization or other methods have different procedures for training and evaluation)
self.train()
self.optimizer.zero_grad() # set all weight grads from previous training iters to 0
y = y.to(self.device)
out = self.forward_pass_helper(x) # forward the data in the model
loss = self.criterion(out, y)
loss.backward() # backpropagate to compute gradients for current iter loss
self.optimizer.step() # update network parameters
_, predicted = torch.max(out.data, 1) # get argmax of predictions
accuracy = np.mean(list(predicted.eq(y.data).cpu())) # compute accuracy
stats['{}_acc'.format(experiment_key)].append(accuracy)
stats['{}_loss'.format(experiment_key)].append(loss.data.detach().cpu().numpy())
self.compute_f_metrics(stats, y, predicted, experiment_key)
def run_evaluation_iter(self, x, y, stats, experiment_key='valid'):
"""
Receives the inputs and targets for the model and runs an evaluation iterations. Returns loss and accuracy metrics.
:param x: The inputs to the model. A numpy array of shape batch_size, channels, height, width
:param y: The targets for the model. A numpy array of shape batch_size, num_classes
:return: the loss and accuracy for this batch
"""
self.eval() # sets the system to validation mode
y = y.to(self.device)
out = self.forward_pass_helper(x)
loss = self.criterion(out, y)
_, predicted = torch.max(out.data, 1) # get argmax of predictions
accuracy = np.mean(list(predicted.eq(y.data).cpu()))
if experiment_key == 'test':
for t, p in zip(y.data.view(-1), predicted.cpu().view(-1)):
self.confusion_matrix[t.long(), p.long()] += 1
stats['{}_acc'.format(experiment_key)].append(accuracy) # compute accuracy
stats['{}_loss'.format(experiment_key)].append(loss.data.detach().cpu().numpy())
self.compute_f_metrics(stats, y, predicted, experiment_key)
return predicted
def save_model(self, model_save_dir, model_save_name, model_idx):
"""
Save the network parameter state and current best val epoch idx and best val accuracy.
:param model_save_name: Name to use to save model without the epoch index
:param model_idx: The index to save the model with.
:param best_validation_model_idx: The index of the best validation model to be stored for future use.
:param best_validation_model_acc: The best validation accuracy to be stored for use at test time.
:param model_save_dir: The directory to store the state at.
:param state: The dictionary containing the system state.
"""
# Save state each epoch
path = os.path.join(model_save_dir, "{}_{}.pt".format(model_save_name, str(model_idx)))
torch.save(self.state_dict(), f=path)
def load_model(self, model_save_dir, model_save_name, model_idx):
"""
Load the network parameter state and the best val model idx and best val acc to be compared with the future val accuracies, in order to choose the best val model
:param model_save_dir: The directory to store the state at.
:param model_save_name: Name to use to save model without the epoch index
:param model_idx: The index to save the model with.
"""
path = os.path.join(model_save_dir, "{}_{}.pt".format(model_save_name, str(model_idx)))
self.load_state_dict(torch.load(f=path))
def remove_excess_models(self):
dir_list_list = [dir_names for (_, dir_names, _) in os.walk(self.experiment_folder)]
for dir_list in dir_list_list:
if 'saved_models' in dir_list:
path = os.path.abspath(os.path.join(self.experiment_folder, 'saved_models'))
file_list_list = [file_names for (_, _, file_names) in os.walk(path)]
for file_list in file_list_list:
for file in file_list:
epoch = file.split('_')[-1]
epoch = epoch.replace('.pt', '')
if int(epoch) != self.best_val_model_idx:
os.remove(os.path.join(path, file))
@staticmethod
def compute_f_metrics(stats, y_true, predicted, type_key):
f1score_overall = f1_score(
y_true.cpu().detach().numpy(),
predicted.cpu().detach().numpy(),
average='weighted'
)
stats[type_key + '_f_score'].append(f1score_overall)
precision_overall = precision_score(
y_true.cpu().detach().numpy(),
predicted.cpu().detach().numpy(),
average='weighted'
)
stats[type_key + '_precision'].append(precision_overall)
recall_overall = recall_score(
y_true.cpu().detach().numpy(),
predicted.cpu().detach().numpy(),
average='weighted'
)
stats[type_key + '_recall'].append(recall_overall)
f1scores = f1_score(
y_true.cpu().detach().numpy(),
predicted.cpu().detach().numpy(),
average=None
)
precision = precision_score(
y_true.cpu().detach().numpy(),
predicted.cpu().detach().numpy(),
average=None
)
recall = recall_score(
y_true.cpu().detach().numpy(),
predicted.cpu().detach().numpy(),
average=None
)
for i in range(len(f1scores)):
stats[type_key + '_f_score_' + LABEL_MAPPING[i]].append(f1scores[i])
stats[type_key + '_precision_' + LABEL_MAPPING[i]].append(precision[i])
stats[type_key + '_recall_' + LABEL_MAPPING[i]].append(recall[i])
def save_best_performing_model(self, epoch_stats, epoch_idx):
criteria = epoch_stats['valid_f_score_hateful']
if criteria > self.best_val_model_criteria: # if current epoch's mean val acc is greater than the saved best val acc then
self.best_val_model_criteria = criteria # set the best val model acc to be current epoch's val accuracy
self.best_val_model_idx = epoch_idx # set the experiment-wise best val idx to be the current epoch's idx
@staticmethod
def iter_logs(stats, start_time, index):
# Log results to terminal
out_string = "".join(["{}: {:0.4f}\n".format(key, value)
for key, value in stats.items() if key != 'epoch'])
epoch_elapsed_time = (time.time() - start_time) / 60 # calculate time taken for epoch
epoch_elapsed_time = "{:.4f}".format(epoch_elapsed_time)
print("\n===Epoch {}===\n{}===Elapsed time: {} mins===".format(index, out_string, epoch_elapsed_time))
def extract_sample_data(self, sample_ids):
embedded_tweets = []
embedded_context_tweets = []
embedded_topic_words = []
embedded_timeline_list = []
for _id in sample_ids:
embedded_tweet = self.data_map[_id]['embedded_tweet']
if self.experiment_flag == 2:
# concatenates retweet/favorite to tweet
retweet_count, favorite_count = self.data_map[_id]['retweet_count'], self.data_map[_id]['favorite_count']
features = torch.Tensor([[retweet_count, favorite_count] for _ in range(np.array(embedded_tweet).shape[0])])
embedded_tweet = np.concatenate((embedded_tweet, features), -1)
# adds context tweet
embedded_context_tweets.append(self.data_map[_id]['embedded_context_tweet'])
if self.experiment_flag == 3:
# concatenates retweet/favorite to tweet
retweet_count, favorite_count = self.data_map[_id]['retweet_count'], self.data_map[_id]['favorite_count']
features = torch.Tensor([[retweet_count, favorite_count] for _ in range(np.array(embedded_tweet).shape[0])])
embedded_tweet = np.concatenate((embedded_tweet, features), -1)
if self.experiment_flag == 4:
embedded_topic_words.append(self.data_map[_id]['embedded_topic_words'])
if self.experiment_flag == 5:
embedded_timeline_list = [torch.Tensor(tweet).float()
for tweet in self.data_map[_id]['embedded_user_timeline']]
# append main tweet
embedded_tweets.append(embedded_tweet)
# get all tweets as corpus and do LDA from that
if self.experiment_flag == 4:
return torch.Tensor(embedded_tweets).float(), torch.Tensor(embedded_topic_words).float()
if self.experiment_flag == 2:
return torch.Tensor(embedded_tweets).float(), torch.Tensor(embedded_context_tweets).float()
elif self.experiment_flag == 1 or self.experiment_flag == 3: # experiments 1 and 3
return torch.Tensor(embedded_tweets).float(), torch.Tensor(embedded_tweets).float()
elif self.experiment_flag == 5:
return torch.Tensor(embedded_tweets).float(), embedded_timeline_list
@staticmethod
def flatten_embedding(out):
"""
Flattens tweet embedding to have dim (batch_size, 1, embed_dim)
:param out:
:return:
"""
out = F.max_pool1d(out, out.shape[-1])
out = out.permute([0, 2, 1])
return out
def build_model(self, data_sample):
# build model
embedded_tweet, features_tweet = data_sample # first element, tuple, first value in tuple
self.model.build_layers(embedded_tweet.shape, 'tweet')
embedded_tweet_out = self.model.forward(torch.zeros(embedded_tweet.shape), layer_key='tweet')
if self.experiment_flag == 1 or self.experiment_flag == 3:
out | |
<reponame>YosephKS/qiskit-terra
# This code is part of Qiskit.
#
# (C) Copyright IBM 2019.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""Test cases for the pulse scheduler passes."""
from qiskit import QuantumRegister, ClassicalRegister, QuantumCircuit, schedule
from qiskit.circuit import Gate, Parameter
from qiskit.circuit.library import U1Gate, U2Gate, U3Gate
from qiskit.exceptions import QiskitError
from qiskit.pulse import (Schedule, DriveChannel, AcquireChannel, Acquire,
MeasureChannel, MemorySlot, Gaussian, Play)
from qiskit.pulse import build, macros
from qiskit.test.mock import FakeBackend, FakeOpenPulse2Q, FakeOpenPulse3Q
from qiskit.test import QiskitTestCase
class TestBasicSchedule(QiskitTestCase):
"""Scheduling tests."""
def setUp(self):
super().setUp()
self.backend = FakeOpenPulse2Q()
self.inst_map = self.backend.defaults().instruction_schedule_map
def test_unavailable_defaults(self):
"""Test backend with unavailable defaults."""
qr = QuantumRegister(1)
qc = QuantumCircuit(qr)
backend = FakeBackend(None)
backend.defaults = backend.configuration
self.assertRaises(QiskitError, lambda: schedule(qc, backend))
def test_alap_pass(self):
"""Test ALAP scheduling."""
q = QuantumRegister(2)
c = ClassicalRegister(2)
qc = QuantumCircuit(q, c)
qc.append(U2Gate(3.14, 1.57), [q[0]])
qc.append(U2Gate(0.5, 0.25), [q[1]])
qc.barrier(q[1])
qc.append(U2Gate(0.5, 0.25), [q[1]])
qc.barrier(q[0], [q[1]])
qc.cx(q[0], q[1])
qc.measure(q, c)
sched = schedule(qc, self.backend)
# X pulse on q0 should end at the start of the CNOT
expected = Schedule(
(28, self.inst_map.get('u2', [0], 3.14, 1.57)),
self.inst_map.get('u2', [1], 0.5, 0.25),
(28, self.inst_map.get('u2', [1], 0.5, 0.25)),
(56, self.inst_map.get('cx', [0, 1])),
(78, self.inst_map.get('measure', [0, 1])))
for actual, expected in zip(sched.instructions, expected.instructions):
self.assertEqual(actual[0], expected[0])
self.assertEqual(actual[1], expected[1])
def test_alap_with_barriers(self):
"""Test that ALAP respects barriers on new qubits."""
q = QuantumRegister(2)
c = ClassicalRegister(2)
qc = QuantumCircuit(q, c)
qc.append(U2Gate(0, 0), [q[0]])
qc.barrier(q[0], q[1])
qc.append(U2Gate(0, 0), [q[1]])
sched = schedule(qc, self.backend, method='alap')
expected = Schedule(
self.inst_map.get('u2', [0], 0, 0),
(28, self.inst_map.get('u2', [1], 0, 0)))
for actual, expected in zip(sched.instructions, expected.instructions):
self.assertEqual(actual[0], expected[0])
self.assertEqual(actual[1], expected[1])
def test_empty_circuit_schedule(self):
"""Test empty circuit being scheduled."""
q = QuantumRegister(2)
c = ClassicalRegister(2)
qc = QuantumCircuit(q, c)
sched = schedule(qc, self.backend, method='alap')
expected = Schedule()
self.assertEqual(sched.instructions, expected.instructions)
def test_alap_aligns_end(self):
"""Test that ALAP always acts as though there is a final global barrier."""
q = QuantumRegister(2)
c = ClassicalRegister(2)
qc = QuantumCircuit(q, c)
qc.append(U3Gate(0, 0, 0), [q[0]])
qc.append(U2Gate(0, 0), [q[1]])
sched = schedule(qc, self.backend, method='alap')
expected_sched = Schedule(
self.inst_map.get('u2', [1], 0, 0),
(26, self.inst_map.get('u3', [0], 0, 0, 0)))
for actual, expected in zip(sched.instructions, expected_sched.instructions):
self.assertEqual(actual[0], expected[0])
self.assertEqual(actual[1], expected[1])
self.assertEqual(sched.ch_duration(DriveChannel(0)),
expected_sched.ch_duration(DriveChannel(1)))
def test_asap_pass(self):
"""Test ASAP scheduling."""
q = QuantumRegister(2)
c = ClassicalRegister(2)
qc = QuantumCircuit(q, c)
qc.append(U2Gate(3.14, 1.57), [q[0]])
qc.append(U2Gate(0.5, 0.25), [q[1]])
qc.barrier(q[1])
qc.append(U2Gate(0.5, 0.25), [q[1]])
qc.barrier(q[0], q[1])
qc.cx(q[0], q[1])
qc.measure(q, c)
sched = schedule(qc, self.backend, method="as_soon_as_possible")
# X pulse on q0 should start at t=0
expected = Schedule(
self.inst_map.get('u2', [0], 3.14, 1.57),
self.inst_map.get('u2', [1], 0.5, 0.25),
(28, self.inst_map.get('u2', [1], 0.5, 0.25)),
(56, self.inst_map.get('cx', [0, 1])),
(78, self.inst_map.get('measure', [0, 1])))
for actual, expected in zip(sched.instructions, expected.instructions):
self.assertEqual(actual[0], expected[0])
self.assertEqual(actual[1], expected[1])
def test_alap_resource_respecting(self):
"""Test that the ALAP pass properly respects busy resources when backwards scheduling.
For instance, a CX on 0 and 1 followed by an X on only 1 must respect both qubits'
timeline."""
q = QuantumRegister(2)
c = ClassicalRegister(2)
qc = QuantumCircuit(q, c)
qc.cx(q[0], q[1])
qc.append(U2Gate(0.5, 0.25), [q[1]])
sched = schedule(qc, self.backend, method="as_late_as_possible")
insts = sched.instructions
self.assertEqual(insts[0][0], 0)
self.assertEqual(insts[4][0], 22)
qc = QuantumCircuit(q, c)
qc.cx(q[0], q[1])
qc.append(U2Gate(0.5, 0.25), [q[1]])
qc.measure(q, c)
sched = schedule(qc, self.backend, method="as_late_as_possible")
self.assertEqual(sched.instructions[-1][0], 50)
def test_inst_map_schedules_unaltered(self):
"""Test that forward scheduling doesn't change relative timing with a command."""
q = QuantumRegister(2)
c = ClassicalRegister(2)
qc = QuantumCircuit(q, c)
qc.cx(q[0], q[1])
sched1 = schedule(qc, self.backend, method="as_soon_as_possible")
sched2 = schedule(qc, self.backend, method="as_late_as_possible")
for asap, alap in zip(sched1.instructions, sched2.instructions):
self.assertEqual(asap[0], alap[0])
self.assertEqual(asap[1], alap[1])
insts = sched1.instructions
self.assertEqual(insts[0][0], 0)
self.assertEqual(insts[1][0], 10)
self.assertEqual(insts[2][0], 20)
self.assertEqual(insts[3][0], 20)
def test_measure_combined(self):
"""
Test to check for measure on the same qubit which generated another measure schedule.
The measures on different qubits are combined, but measures on the same qubit
adds another measure to the schedule.
"""
q = QuantumRegister(2)
c = ClassicalRegister(2)
qc = QuantumCircuit(q, c)
qc.append(U2Gate(3.14, 1.57), [q[0]])
qc.cx(q[0], q[1])
qc.measure(q[0], c[0])
qc.measure(q[1], c[1])
qc.measure(q[1], c[1])
sched = schedule(qc, self.backend, method="as_soon_as_possible")
expected = Schedule(
self.inst_map.get('u2', [0], 3.14, 1.57),
(28, self.inst_map.get('cx', [0, 1])),
(50, self.inst_map.get('measure', [0, 1])),
(60, self.inst_map.get('measure', [0, 1]).filter(channels=[MeasureChannel(1)])),
(60, Acquire(10, AcquireChannel(0), MemorySlot(0))),
(60, Acquire(10, AcquireChannel(1), MemorySlot(1))))
self.assertEqual(sched.instructions, expected.instructions)
def test_3q_schedule(self):
"""Test a schedule that was recommended by <NAME> :D """
backend = FakeOpenPulse3Q()
inst_map = backend.defaults().instruction_schedule_map
q = QuantumRegister(3)
c = ClassicalRegister(3)
qc = QuantumCircuit(q, c)
qc.cx(q[0], q[1])
qc.append(U2Gate(0.778, 0.122), [q[2]])
qc.append(U3Gate(3.14, 1.57, 0), [q[0]])
qc.append(U2Gate(3.14, 1.57), [q[1]])
qc.cx(q[1], q[2])
qc.append(U2Gate(0.778, 0.122), [q[2]])
sched = schedule(qc, backend)
expected = Schedule(
inst_map.get('cx', [0, 1]),
(22, inst_map.get('u2', [1], 3.14, 1.57)),
(46, inst_map.get('u2', [2], 0.778, 0.122)),
(50, inst_map.get('cx', [1, 2])),
(72, inst_map.get('u2', [2], 0.778, 0.122)),
(74, inst_map.get('u3', [0], 3.14, 1.57)))
for actual, expected in zip(sched.instructions, expected.instructions):
self.assertEqual(actual[0], expected[0])
self.assertEqual(actual[1], expected[1])
def test_schedule_multi(self):
"""Test scheduling multiple circuits at once."""
q = QuantumRegister(2)
c = ClassicalRegister(2)
qc0 = QuantumCircuit(q, c)
qc0.cx(q[0], q[1])
qc1 = QuantumCircuit(q, c)
qc1.cx(q[0], q[1])
schedules = schedule([qc0, qc1], self.backend)
expected_insts = schedule(qc0, self.backend).instructions
for actual, expected in zip(schedules[0].instructions, expected_insts):
self.assertEqual(actual[0], expected[0])
self.assertEqual(actual[1], expected[1])
def test_circuit_name_kept(self):
"""Test that the new schedule gets its name from the circuit."""
q = QuantumRegister(2)
c = ClassicalRegister(2)
qc = QuantumCircuit(q, c, name='CIRCNAME')
qc.cx(q[0], q[1])
sched = schedule(qc, self.backend, method="asap")
self.assertEqual(sched.name, qc.name)
sched = schedule(qc, self.backend, method="alap")
self.assertEqual(sched.name, qc.name)
def test_can_add_gates_into_free_space(self):
"""The scheduler does some time bookkeeping to know when qubits are free to be
scheduled. Make sure this works for qubits that are used in the future. This was
a bug, uncovered by this example:
q0 = - - - - |X|
q1 = |X| |u2| |X|
In ALAP scheduling, the next operation on qubit 0 would be added at t=0 rather
than immediately before the X gate.
"""
qr = QuantumRegister(2)
qc = QuantumCircuit(qr)
for i in range(2):
qc.append(U2Gate(0, 0), [qr[i]])
qc.append(U1Gate(3.14), [qr[i]])
qc.append(U2Gate(0, 0), [qr[i]])
sched = schedule(qc, self.backend, method="alap")
expected = Schedule(
self.inst_map.get('u2', [0], 0, 0),
self.inst_map.get('u2', [1], 0, 0),
(28, self.inst_map.get('u1', [0], 3.14)),
(28, self.inst_map.get('u1', [1], 3.14)),
(28, self.inst_map.get('u2', [0], 0, 0)),
(28, self.inst_map.get('u2', [1], 0, 0)))
for actual, expected in zip(sched.instructions, expected.instructions):
self.assertEqual(actual[0], expected[0])
self.assertEqual(actual[1], expected[1])
def test_barriers_in_middle(self):
"""As a follow on to `test_can_add_gates_into_free_space`, similar issues
arose for barriers, specifically.
"""
qr = QuantumRegister(2)
qc = QuantumCircuit(qr)
for i in range(2):
qc.append(U2Gate(0, 0), [qr[i]])
qc.barrier(qr[i])
qc.append(U1Gate(3.14), [qr[i]])
qc.barrier(qr[i])
qc.append(U2Gate(0, 0), [qr[i]])
sched = schedule(qc, self.backend, method="alap")
expected = Schedule(
self.inst_map.get('u2', [0], 0, 0),
self.inst_map.get('u2', [1], 0, 0),
(28, self.inst_map.get('u1', [0], 3.14)),
(28, self.inst_map.get('u1', [1], 3.14)),
(28, self.inst_map.get('u2', [0], 0, 0)),
(28, self.inst_map.get('u2', [1], 0, 0)))
for actual, expected in zip(sched.instructions, expected.instructions):
self.assertEqual(actual[0], expected[0])
self.assertEqual(actual[1], expected[1])
def test_parametric_input(self):
"""Test that scheduling works with parametric pulses as input."""
qr = QuantumRegister(1)
qc = QuantumCircuit(qr)
qc.append(Gate('gauss', 1, []), qargs=[qr[0]])
custom_gauss = Schedule(Play(Gaussian(duration=25, sigma=4, amp=0.5j), DriveChannel(0)))
self.inst_map.add('gauss', [0], custom_gauss)
sched = schedule(qc, self.backend, inst_map=self.inst_map)
self.assertEqual(sched.instructions[0],
custom_gauss.instructions[0])
def test_pulse_gates(self):
"""Test scheduling calibrated pulse gates."""
q = QuantumRegister(2)
qc = QuantumCircuit(q)
qc.append(U2Gate(0, 0), [q[0]])
qc.barrier(q[0], q[1])
qc.append(U2Gate(0, 0), [q[1]])
qc.add_calibration('u2', [0], Schedule(Play(Gaussian(28, 0.2, 4), DriveChannel(0))), [0, 0])
qc.add_calibration('u2', [1], Schedule(Play(Gaussian(28, 0.2, 4), DriveChannel(1))), [0, 0])
sched = schedule(qc, self.backend)
expected = Schedule(
Play(Gaussian(28, 0.2, 4), DriveChannel(0)),
(28, Schedule(Play(Gaussian(28, 0.2, 4), DriveChannel(1)))))
self.assertEqual(sched.instructions, expected.instructions)
def test_calibrated_measurements(self):
"""Test scheduling calibrated measurements."""
q = QuantumRegister(2)
c = ClassicalRegister(2)
qc = QuantumCircuit(q, c)
qc.append(U2Gate(0, 0), [q[0]])
qc.measure(q[0], c[0])
meas_sched = Play(Gaussian(1200, 0.2, 4), MeasureChannel(0))
meas_sched |= Acquire(1200, AcquireChannel(0), MemorySlot(0))
qc.add_calibration('measure', [0], meas_sched)
sched = schedule(qc, self.backend)
expected = Schedule(
self.inst_map.get('u2', [0], 0, 0),
(28, meas_sched))
self.assertEqual(sched.instructions, expected.instructions)
def test_subset_calibrated_measurements(self):
"""Test that measurement calibrations can be added and used for some qubits, even
if the other qubits do not also have calibrated measurements."""
qc = QuantumCircuit(3, 3)
qc.measure(0, 0)
qc.measure(1, 1)
qc.measure(2, 2)
meas_scheds = []
for qubit in [0, 2]:
meas = (Play(Gaussian(1200, 0.2, 4), MeasureChannel(qubit))
+ Acquire(1200, AcquireChannel(qubit), MemorySlot(qubit)))
meas_scheds.append(meas)
qc.add_calibration('measure', [qubit], meas)
meas = macros.measure([1], FakeOpenPulse3Q())
meas = meas.exclude(channels=[AcquireChannel(0), | |
+ 1
# sp = sorted(counts.items(), key = lambda i:-i[1])
# for j in range(10):
# print("E (KeV): {:>8.3f} - count {:>8}".format(*sp[j]))
# plt.subplot(1,3,ich+1)
# plt.hist(this_e, histtype='step', bins=np.linspace(59.3,60,1000), label="ch {}".format(ch))
# plt.legend()
# plt.xlabel("E (KeV)")
# plt.suptitle("{}, simulated energy deposits (KeV)".format(src))
# plt.show()
# sys.exit(0)
# # test getting gamma ray track info
# # </TEMP>
# make transformer instances
for ch in self.ch_comp:
this_sim_e = positive(this_bm[self.sim_edep_kev.format(ch)])
this_sim_bg = positive(self.bm_obs_bg[self.exp_area_pvs.format(ch)])
self.sim_e[src][ch] = this_sim_e
self.sim_bg[ch] = this_sim_bg
t1=time.time()
this_transformer = transformer_mixer(
this_sim_e,
self.en_edges[src][ch],
this_sim_bg,
self.ar_edges[src][ch],
)
t2=time.time()
self.vp(2,"setting up transformer {},{} took {:.3f} ms".format(src,ch,(t2-t1)*1000))
self.transformers[src][ch] = this_transformer
self.vp(1,"")
def evaluate_fit_dev(self, ch):
"""developing evaluation routine using utils.fit"""
par = fit.parametrizer()
# p is the parameter_holder object generated by par
# what are mu and sigma, in area, as functions of E?
# this area is untransformed; that is, it's what it would
# be without any saturation effects.
def mu_sigma(E, p):
mu = p.gamma*E
return mu, mu*np.sqrt(p.rho_p/E + (p.res_s_a*E+p.res_s_b)**2)
par.add_parameters({
"gamma" : 1150.0 ,
"rho_p" : 0.1000,
"res_s_a": 0.0090,
"res_s_b": 0.0000,
})
# # Inverse of the transformation which is applied by saturation.
# # ixf takes observed area and gives what the area would have been
# # for the same signal, if saturation effects did not exist.
# # A_unsat = ixf(A_obs, *p)
# exponent = 1
# def ixf_and_deriv(A_obs, p):
# q1 = (p.ixf_a*1000/A_obs) ** exponent
# exp_mq1 = np.exp(-q1)
# return A_obs*(1+p.ixf_b*exp_mq1), 1+(1-exponent*q1)*p.ixf_b*exp_mq1
# par.add_parameters({
# "ixf_a": 0.2,
# "ixf_b":120.0,
# })
def ixf_and_deriv(A_obs, p):
xf = A_obs * (1 + p.ixf_a * (A_obs / 100000) + p.ixf_b * (A_obs / 100000)**2 )
dxf = (1 + 2 * p.ixf_a * (A_obs / 100000) + 3 * p.ixf_b * (A_obs / 100000)**2 )
return xf, dxf
par.add_parameters({
"ixf_a": 0.2,
"ixf_b": 0.0,
})
projectors = {}
for src in self.fit_sources:
# returns a function f(x, yprime, f_params, xf_params)
# here x is Energy, yprime is observed area
# f_params are the parameters of the E->mu,sigm function
# xf_params are the parameters of ixf
#
# this function calculates the contribution to the observed
# are spectrum at Aprime from one event of energy E
this_func = fit.transformed_gaus_spread(
mu_sigma,
ixf_and_deriv,
)
# # no transformation >:(
# this_func = fit.gaus_spread(mu_sigma)
# this sets up calculation for optimization
# calculations are done on 2d arrays with axes (energy, observed_area)
# and then projected onto the area axis to get an area spectrum.
#
# the resulting object can be called with just p
this_projector = fit.binned_projector(
func = this_func,
xMids = self.en_mids[src][ch],
yMids = self.ar_mids[src][ch],
xEdges = self.en_edges[src][ch],
xData = self.sim_e[src][ch],
)
projectors[src] = this_projector
par.add_parameters({
"n_src_{}_1".format(src): self.n_src_guess[src][ch],
"n_bg_{}".format(src) : self.n_bg_guess[src][ch],
})
def model_spectrum(src, p):
# transform source spectrum pieces
# mix source pieces and background
# temporarily just one piece per source
src_norm = projectors[src](p)
bg_norm = self.transformers[src][ch].bg_counts
# normalize to unit sum
src_norm = src_norm / src_norm.sum()
bg_norm = bg_norm / bg_norm .sum()
spec = bg_norm * getattr(p, "n_bg_{}".format(src))
spec += src_norm * getattr(p, "n_src_{}_1".format(src))
return spec
def model_spectra(xdata, p):
spectra = np.concatenate([model_spectrum(_,p) for _ in self.sim_sources], axis=0)
return spectra
# xdata = self.fit_xflat[ch]
xdata = np.concatenate([self.ar_mids[_][ch] for _ in self.fit_sources],axis=0)
ydata = np.concatenate([self.counts[ _][ch] for _ in self.fit_sources],axis=0)
p0 = par.get_p0()
popt = par.curve_fit(
xdata = xdata,
ydata = ydata,
yerr = np.sqrt(ydata),
f = model_spectra,
)
pft = lambda fmt,ents,sep=' ':sep.join([fmt.format(_) for _ in ents])
fmt_par = '{:>14.6f}'
fmt_name = '{:>14}'
print("\npopt")
print(popt)
print(popt.f_names)
print(pft(fmt_name, popt.v_names ))
print(pft(fmt_par , p0.v_values ))
print(pft(fmt_par , popt.v_values))
print(pft(fmt_par , popt.v_err ))
print("\ngoodness of fit")
print("chi2 / ndof = {:.1f} / {} = {:.4f}".format(popt.chi2, popt.ndof, popt.rchi2))
# print("\ncalculating error on modeled counts")
ym_opt = model_spectra(xdata, popt)
ym_0 = model_spectra(xdata, p0)
# ym_err = np.array([par.scalar_num_error_p_only(popt, model_spectra) for _ in xdata])
# ym_pull = (ytrue - ym_opt) / ym_err
# ychi2 = (ym_pull[ftr_nz] ** 2).sum()
# yndof = ftr_nz.sum()
# print("modeled counts vs. truth")
# print("chi2 / ndof = {:.1f} / {} = {:.4f}".format(ychi2, yndof, ychi2/yndof))
print('\nplotting results')
xline = np.linspace(0,1,xdata.size)
plt.step(xline, ydata, 'k', where='mid', label='data')
plt.fill_between(xline, ydata, 0, color='k', alpha=0.1, step='mid')
plt.plot(xline, ym_0 , 'b-', label='model with p0')
plt.plot(xline, ym_opt, 'g-', label='optimal model')
# plt.fill_between(xtest, ym_opt-ym_err, ym_opt+ym_err, step=None, color='g', alpha=0.25)
plt.legend()
plt.show()
def plot_basic_check(self, sources=None, savefig="", ):
"""test view of some of the loaded data"""
# savefig = "sample_vs_spread_{}_tweak_counts"
guess_c = 380
guess_d = 1.5
if sources is None:
show_sources = self.bms_sim_src.keys()
for src in show_sources:
# plt.subplot(121)
# plt.hist(bms_sim[src]["e1"], histtype='step', bins=400, label="ch 1")
# plt.hist(bms_sim[src]["e2"], histtype='step', bins=400, label="ch 2")
# plt.hist(bms_sim[src]["e3"], histtype='step', bins=400, label="ch 3")
# # plt.hist(bms_sim[src]["e4"], histtype='step', bins=400, label="ch 4")
# plt.yscale('log')
# plt.xlabel('simulated energy deposits (KeV)')
# plt.ylabel('counts')
# plt.title(src)
# plt.legend()
# # plt.savefig("./figs/sim_{}_edep.png".format(src.lower()))
# # plt.show()
this_lo, this_hi = self.src_area_range_full[src]
plt.subplots(nrows=1,ncols=len(self.ch_comp),sharex=True,sharey=True)
for ich,ch in enumerate(self.ch_comp):
this_transformer = self.transformers[src][ch]
this_counts = None
this_ar_mids = None
t1=time.time()
this_xf_result = this_transformer(
this_ar_mids,
guess_c,
guess_d,
self.gamma[ch],
self.res_s[ch],
self.rho_p[ch],
en_counts=this_counts
)
t2=time.time()
self.vp(2,"calling transformer {},{} took {:.3f} ms".format(src,ch,(t2-t1)*1000))
plt.subplot(1,len(self.ch_comp),ich+1)
plt.hist(
self.bms_obs_src[src][self.exp_area_pvs.format(ch)],
this_transformer.ar_mids,
label="observed",
histtype='step',
)
plt.step(this_transformer.ar_mids, this_xf_result, where='mid', label="simulated")
plt.yscale('log')
plt.xlabel('area (pVs)')
plt.ylabel('counts')
plt.title("Channel {}".format(ch))
plt.legend()
self.vp(2,"")
plt.ylim(1, plt.ylim()[1])
plt.suptitle(src)
if savefig:
plt.savefig(savefig.format(src=src.lower()))
plt.show()
def evaluate(self, xdata, *parameters, incl_src=True, incl_bg=True):
# unpack parameters
#
# we get one value each of gamma, res_s, and rho_p, since these
# are parameters of the channel, and do not vary by source
#
# however, we get one value each, per source, of c and d
# so we need to unpack these accounting for the sources present
#
# the order of *cd is assumed to be c0,d0,c1,d1, etc, where the
# ordering 0,1,... is the sorted order of source identifiers
cd = parameters[:2*len(self.fit_sources)]
gamma, *res_s, rho_p, ps0, ps1 = parameters[2*len(self.fit_sources):]
sources = sorted(xdata.keys())
amp_sim = {s:cd[2*i ] for i,s in enumerate(sources)}
amp_bg = {s:cd[2*i+1] for i,s in enumerate(sources)}
# calculate total size
size = sum(_.size for _ in xdata.values())
# array of modeled area spectrum
# we'll fill it in piecewise
spec = np.zeros(size)
# what channel is this
ch = xdata.id
# calculate modeled counts one source at a time
istart = 0
for isrc,src in enumerate(sources):
this_ar_mids = xdata[src]
this_transformer = self.transformers[src][ch]
# C, D, gamma, res_s, rho_p = params
this_mixed = this_transformer(
this_ar_mids,
amp_sim[src],
amp_bg[src],
gamma,
*res_s,
rho_p,
ps0, ps1,
incl_src = incl_src,
incl_bg = incl_bg ,
)
spec[istart:istart+this_mixed.size] = this_mixed
istart += this_mixed.size
return spec
def fit(self, channels=None, sources=None, plot_test_fit=False, plot_best_fit=False):
"""fit model to all sources' spectra at once"""
if channels is None:
channels = self.ch_comp
if sources is None:
sources = self.all_sources
# this is important!
# pieces of the routine rely on the list of source
# identifiers being sorted.
self.fit_sources = sorted(sources)
self.fit_channels = sorted(channels)
del sources
del channels
# keep the results for later
# todo: implement fit_result class
# have one dict of ch:fit_result
# assign this_result and use that in loop
self.counts = {_:{} for _ in self.fit_sources}
self.fit_npars = {}
self.fit_xdata = {}
self.fit_ydata = {}
self.fit_yerr = {}
self.fit_ydata_pieces = {}
self.fit_ydata_pos = {}
self.fit_popt = {}
self.fit_perr = {}
self.fit_pcov = {}
self.fit_yopt = {}
self.fit_yopt_src = {}
self.fit_yopt_bg = {}
self.fit_yerr_sim = {}
self.fit_yerr_tot = {}
self.fit_resid = {}
self.fit_pulls = {}
self.fit_chi2 = {}
self.fit_ndof = {}
self.fit_rchi2 = {}
self.fit_xflat = {}
self.fit_xline = {}
class idict(dict):
"""dict with extra attribute"""
def __init__(self, id_, *args, **kwargs):
super(idict, self).__init__(*args, **kwargs)
self.id = id_
def flat(self):
return np.concatenate([self[_] for _ in sorted(self.keys())], axis=0)
# do the fit routine one channel at a time
# since no parameters are shared between channels
for ch in self.fit_channels:
self.vp(1,"")
self.vp(1,"performing fit routine on channel {}".format(ch))
# xdata is a dict of src:ar_mids
# which additionally has a channel identifier
# which lets evaluate determine which channel to use
self.fit_xdata[ch] = idict(ch)
# ydata is an array with shape (n)
# array counts in area histograms
# it will be concatenated from each source in this channel
self.fit_ydata_pieces[ch] = []
# get the pieces, per source
for src in self.fit_sources:
self.fit_xdata[ch][src] = self.ar_mids[src][ch]
this_counts, _ = np.histogram(
self.bms_obs_src[src][self.exp_area_pvs.format(ch)],
self.ar_edges[src][ch],
)
self.fit_ydata_pieces[ch].append(this_counts)
self.counts[src][ch] = this_counts
self.fit_ydata[ch] = np.concatenate(self.fit_ydata_pieces[ch], axis=0)
# compose initial parameter guesses
# if needed: better way for gamma, res_s, rho_p
# if needed: nonzero guess for background contribution
n_res_s_pars = 2
res_s_basic = False
p0_cd = []
if res_s_basic:
p0_rest = [self.gamma[ch], self.res_s[ch], self.rho_p[ch]] + squish_p0
else:
p0_rest = [self.gamma[ch], *([0.0]*n_res_s_pars), self.rho_p[ch]] + squish_p0
unit_evaluation = self.evaluate(self.fit_xdata[ch], *([1,0]*len(self.fit_sources) + p0_rest))
istart = 0
for isrc,src in enumerate(self.fit_sources):
sum_ydata = self.fit_ydata_pieces[ch][isrc].sum()
sum_unit = unit_evaluation[istart:istart+self.fit_xdata[ch][src].size].sum()
istart += self.fit_xdata[ch][src].size
p0_cd += [sum_ydata / sum_unit, 0.0]
p0 = p0_cd + p0_rest
self.fit_npars[ch] = len(p0)
# test the evaluation (fit) function with guess parameters
# todo: this uses zero background contribution, so it isn't
# useful for troubleshooting background contributions
self.fit_xflat[ch] = self.fit_xdata[ch].flat()
self.fit_xline[ch] = np.linspace(0,1,self.fit_xflat[ch].size)
if plot_test_fit:
y0 = self.evaluate(self.fit_xdata[ch],*p0)
plt.plot(self.fit_xline[ch], self.fit_ydata[ch], label='data')
plt.plot(self.fit_xline[ch], y0 , label='model, guess params')
plt.yscale('log')
plt.xlabel('bins (concanetated, unscaled)')
plt.ylabel('counts')
plt.legend()
plt.show()
# perform optimization and evaluate results
self.vp(1, "performing curve_fit")
self.fit_ydata_pos[ch] = (self.fit_ydata[ch] > 0)
self.fit_yerr[ch] = np.sqrt(self.fit_ydata[ch])
# todo: better way of handling zero-count bins
self.fit_yerr[ch][self.fit_yerr[ch] <= 0] = 1.0
self.fit_popt[ch], self.fit_pcov[ch] = opt.curve_fit(
self.evaluate,
self.fit_xdata[ch],
self.fit_ydata[ch],
p0=p0,
sigma=self.fit_yerr[ch],
absolute_sigma=True,
)
self.fit_perr[ch] = np.sqrt(np.diag(self.fit_pcov[ch]))
self.fit_yopt[ch] = self.evaluate(self.fit_xdata[ch],*self.fit_popt[ch])
self.fit_yopt_src[ch] = self.evaluate(self.fit_xdata[ch],*self.fit_popt[ch], incl_bg=False)
self.fit_yopt_bg[ch] = self.evaluate(self.fit_xdata[ch],*self.fit_popt[ch], incl_src=False)
self.fit_resid[ch] = self.fit_ydata[ch] - self.fit_yopt[ch]
# self.fit_yerr_sim[ch] = np.sqrt(self.fit_yopt[ch])
# todo: full error calculation
# currently just include error contributions from c, d
# as well as the poisson error on background bin counts
# since these are the easiest components
pieces_err_c = []
pieces_err_d = []
pieces_err_bg = []
jstart = 0
for isrc,src in enumerate(self.fit_sources):
this_c = self.fit_popt[ch][isrc*2 ]
this_d = self.fit_popt[ch][isrc*2+1]
this_c_err = self.fit_perr[ch][isrc*2 ]
this_d_err = self.fit_perr[ch][isrc*2+1]
this_bg_counts = self.transformers[src][ch].bg_counts
this_size = this_bg_counts.size
pieces_err_bg.append(np.sqrt(this_bg_counts) * this_d)
pieces_err_d.append(this_d_err * self.fit_yopt_bg[ ch][jstart:jstart+this_size] / this_d)
pieces_err_c.append(this_c_err * self.fit_yopt_src[ch][jstart:jstart+this_size] / this_c)
jstart += this_size
err_c = np.concatenate(pieces_err_c , axis=0)
err_d = np.concatenate(pieces_err_d , axis=0)
err_bg = np.concatenate(pieces_err_bg, axis=0)
self.fit_yerr_sim[ch] = np.sqrt(err_c**2 + err_d**2 + err_bg**2)
self.fit_yerr_tot[ch] = np.sqrt(self.fit_yerr[ch]**2 + self.fit_yerr_sim[ch]**2)
self.fit_pulls[ch] = self.fit_resid[ch] / self.fit_yerr_tot[ch]
self.fit_chi2[ch] = (self.fit_pulls[ch][self.fit_ydata_pos[ch]] ** 2).sum()
self.fit_ndof[ch] = self.fit_ydata_pos[ch].sum() - self.fit_npars[ch]
self.fit_rchi2[ch] = self.fit_chi2[ch] / self.fit_ndof[ch]
# print results
pfe = "{:>12.4e}"
pff = "{:>12.4f}"
self.vp(1,"popt (top), perr (bottom)")
self.vp(1," ".join([pff.format(_) for _ in self.fit_popt[ch]]))
self.vp(1," ".join([pff.format(_) for _ in self.fit_perr[ch]]))
self.vp(1,"")
self.vp(2,"pcov")
self.vp(2,"\n".join([" ".join([pfe.format(__) for | |
from django.conf import settings
from django.contrib import messages
from django.core.mail import send_mail
from django.db import transaction
from django.db.models import Count
from django.http import HttpResponse
from django.shortcuts import get_object_or_404, redirect, render
from django.template.defaultfilters import pluralize
from django.utils.text import slugify
from django.views.generic import View
from net.models import Connection
from peeringdb.filters import NetworkIXLanFilterSet
from peeringdb.forms import NetworkIXLanFilterForm
from peeringdb.models import NetworkIXLan
from peeringdb.tables import NetworkContactTable, NetworkIXLanTable
from utils.forms import ConfirmationForm
from utils.views import (
AddOrEditView,
BulkAddFromDependencyView,
BulkDeleteView,
BulkEditView,
DeleteView,
DetailsView,
ModelListView,
PermissionRequiredMixin,
ReturnURLMixin,
)
from .filters import (
AutonomousSystemFilterSet,
BGPGroupFilterSet,
CommunityFilterSet,
ConfigurationFilterSet,
DirectPeeringSessionFilterSet,
EmailFilterSet,
InternetExchangeFilterSet,
InternetExchangePeeringSessionFilterSet,
RouterFilterSet,
RoutingPolicyFilterSet,
)
from .forms import (
AutonomousSystemEmailForm,
AutonomousSystemFilterForm,
AutonomousSystemForm,
BGPGroupBulkEditForm,
BGPGroupFilterForm,
BGPGroupForm,
CommunityBulkEditForm,
CommunityFilterForm,
CommunityForm,
ConfigurationFilterForm,
ConfigurationForm,
DirectPeeringSessionBulkEditForm,
DirectPeeringSessionFilterForm,
DirectPeeringSessionForm,
EmailFilterForm,
EmailForm,
InternetExchangeBulkEditForm,
InternetExchangeFilterForm,
InternetExchangeForm,
InternetExchangePeeringDBForm,
InternetExchangePeeringSessionBulkEditForm,
InternetExchangePeeringSessionFilterForm,
InternetExchangePeeringSessionForm,
RouterBulkEditForm,
RouterFilterForm,
RouterForm,
RoutingPolicyBulkEditForm,
RoutingPolicyFilterForm,
RoutingPolicyForm,
)
from .models import (
AutonomousSystem,
BGPGroup,
BGPSession,
Community,
Configuration,
DirectPeeringSession,
Email,
InternetExchange,
InternetExchangePeeringSession,
Router,
RoutingPolicy,
)
from .tables import (
AutonomousSystemTable,
BGPGroupTable,
CommunityTable,
ConfigurationTable,
DirectPeeringSessionTable,
EmailTable,
InternetExchangeConnectionTable,
InternetExchangePeeringSessionTable,
InternetExchangeTable,
RouterConnectionTable,
RouterTable,
RoutingPolicyTable,
)
class ASList(PermissionRequiredMixin, ModelListView):
permission_required = "peering.view_autonomoussystem"
queryset = AutonomousSystem.objects.annotate(
directpeeringsession_count=Count("directpeeringsession", distinct=True),
internetexchangepeeringsession_count=Count(
"internetexchangepeeringsession", distinct=True
),
).order_by("affiliated", "asn")
filter = AutonomousSystemFilterSet
filter_form = AutonomousSystemFilterForm
table = AutonomousSystemTable
template = "peering/autonomoussystem/list.html"
class ASAdd(PermissionRequiredMixin, AddOrEditView):
permission_required = "peering.add_autonomoussystem"
model = AutonomousSystem
form = AutonomousSystemForm
return_url = "peering:autonomoussystem_list"
template = "peering/autonomoussystem/add_edit.html"
class ASDetails(DetailsView):
permission_required = "peering.view_autonomoussystem"
queryset = AutonomousSystem.objects.all()
def get_context(self, request, **kwargs):
instance = get_object_or_404(self.queryset, **kwargs)
try:
affiliated = AutonomousSystem.objects.get(
pk=request.user.preferences.get("context.as")
)
except AutonomousSystem.DoesNotExist:
affiliated = None
shared_internet_exchanges = {}
for ix in instance.get_shared_internet_exchange_points(affiliated):
shared_internet_exchanges[ix] = instance.get_missing_peering_sessions(
affiliated, ix
)
return {
"instance": instance,
"shared_internet_exchanges": shared_internet_exchanges,
"active_tab": "main",
}
class ASEdit(PermissionRequiredMixin, AddOrEditView):
permission_required = "peering.change_autonomoussystem"
model = AutonomousSystem
form = AutonomousSystemForm
template = "peering/autonomoussystem/add_edit.html"
class ASEmail(PermissionRequiredMixin, View):
permission_required = "peering.send_email"
def get(self, request, *args, **kwargs):
instance = get_object_or_404(AutonomousSystem, asn=kwargs["asn"])
if not instance.can_receive_email:
return redirect(instance.get_absolute_url())
form = AutonomousSystemEmailForm()
form.fields["recipient"].choices = instance.get_contact_email_addresses()
return render(
request,
"peering/autonomoussystem/email.html",
{"instance": instance, "form": form, "active_tab": "email"},
)
def post(self, request, *args, **kwargs):
instance = get_object_or_404(AutonomousSystem, asn=kwargs["asn"])
if not instance.can_receive_email:
redirect(instance.get_absolute_url())
form = AutonomousSystemEmailForm(request.POST)
form.fields["recipient"].choices = instance.get_contact_email_addresses()
if form.is_valid():
sent = send_mail(
form.cleaned_data["subject"],
form.cleaned_data["body"],
settings.SERVER_EMAIL,
[form.cleaned_data["recipient"]],
)
if sent == 1:
messages.success(request, "Email sent.")
else:
messages.error(request, "Unable to send the email.")
return redirect(instance.get_absolute_url())
class ASDelete(PermissionRequiredMixin, DeleteView):
permission_required = "peering.delete_autonomoussystem"
model = AutonomousSystem
return_url = "peering:autonomoussystem_list"
class ASBulkDelete(PermissionRequiredMixin, BulkDeleteView):
permission_required = "peering.delete_autonomoussystem"
model = AutonomousSystem
filter = AutonomousSystemFilterSet
table = AutonomousSystemTable
class AutonomousSystemContacts(PermissionRequiredMixin, ModelListView):
permission_required = "peering.view_autonomoussystem"
table = NetworkContactTable
template = "peering/autonomoussystem/contacts.html"
def build_queryset(self, request, kwargs):
queryset = None
if "asn" in kwargs:
instance = get_object_or_404(AutonomousSystem, asn=kwargs["asn"])
queryset = instance.peeringdb_contacts
return queryset
def extra_context(self, kwargs):
extra_context = {"active_tab": "contacts"}
if "asn" in kwargs:
extra_context.update(
{"instance": get_object_or_404(AutonomousSystem, asn=kwargs["asn"])}
)
return extra_context
class AutonomousSystemDirectPeeringSessions(PermissionRequiredMixin, ModelListView):
permission_required = "peering.view_autonomoussystem"
filter = DirectPeeringSessionFilterSet
filter_form = DirectPeeringSessionFilterForm
table = DirectPeeringSessionTable
template = "peering/autonomoussystem/direct_peering_sessions.html"
def build_queryset(self, request, kwargs):
instance = get_object_or_404(AutonomousSystem, asn=kwargs["asn"])
return (
instance.get_direct_peering_sessions()
.prefetch_related("bgp_group")
.order_by("bgp_group", "relationship", "ip_address")
)
def extra_context(self, kwargs):
return {
"instance": get_object_or_404(AutonomousSystem, asn=kwargs["asn"]),
"active_tab": "directsessions",
}
class AutonomousSystemInternetExchangesPeeringSessions(
PermissionRequiredMixin, ModelListView
):
permission_required = "peering.view_autonomoussystem"
filter = InternetExchangePeeringSessionFilterSet
filter_form = InternetExchangePeeringSessionFilterForm
table = InternetExchangePeeringSessionTable
template = "peering/autonomoussystem/internet_exchange_peering_sessions.html"
hidden_filters = ["autonomous_system__id"]
def build_queryset(self, request, kwargs):
instance = get_object_or_404(AutonomousSystem, asn=kwargs["asn"])
return (
instance.get_ixp_peering_sessions()
.prefetch_related("ixp_connection")
.order_by("ixp_connection", "ip_address")
)
return queryset
def extra_context(self, kwargs):
return {
"instance": get_object_or_404(AutonomousSystem, asn=kwargs["asn"]),
"active_tab": "ixsessions",
}
class AutonomousSystemPeers(PermissionRequiredMixin, ModelListView):
permission_required = "peering.view_autonomoussystem"
table = NetworkIXLanTable
template = "peering/autonomoussystem/peers.html"
def build_queryset(self, request, kwargs):
queryset = NetworkIXLan.objects.none()
try:
affiliated = AutonomousSystem.objects.get(
pk=request.user.preferences.get("context.as")
)
except AutonomousSystem.DoesNotExist:
affiliated = None
if "asn" in kwargs and affiliated:
instance = get_object_or_404(AutonomousSystem, asn=kwargs["asn"])
queryset = instance.get_missing_peering_sessions(affiliated)
return queryset
def extra_context(self, kwargs):
extra_context = {"active_tab": "peers"}
if "asn" in kwargs:
instance = get_object_or_404(AutonomousSystem, asn=kwargs["asn"])
extra_context.update({"instance": instance})
return extra_context
class AutonomousSystemAddFromPeeringDB(
PermissionRequiredMixin, BulkAddFromDependencyView
):
permission_required = "peering.add_internetexchangepeeringsession"
model = InternetExchangePeeringSession
dependency_model = NetworkIXLan
form_model = InternetExchangePeeringSessionForm
template = "peering/internetexchangepeeringsession/add_from_peeringdb.html"
def process_dependency_object(self, request, dependency):
try:
affiliated = AutonomousSystem.objects.get(
pk=request.user.preferences.get("context.as")
)
except AutonomousSystem.DoesNotExist:
return []
return InternetExchangePeeringSession.create_from_peeringdb(
affiliated, dependency
)
def sort_objects(self, object_list):
objects = []
for object_couple in object_list:
for o in object_couple:
if o:
objects.append(
{
"autonomous_system": o.autonomous_system,
"ixp_connection": o.ixp_connection,
"ip_address": o.ip_address,
}
)
return objects
class BGPGroupList(PermissionRequiredMixin, ModelListView):
permission_required = "peering.view_bgpgroup"
queryset = BGPGroup.objects.annotate(
directpeeringsession_count=Count("directpeeringsession")
).order_by("name", "slug")
filter = BGPGroupFilterSet
filter_form = BGPGroupFilterForm
table = BGPGroupTable
template = "peering/bgpgroup/list.html"
class BGPGroupDetails(DetailsView):
permission_required = "peering.view_bgpgroup"
queryset = BGPGroup.objects.all()
def get_context(self, request, **kwargs):
return {
"instance": get_object_or_404(self.queryset, **kwargs),
"active_tab": "main",
}
class BGPGroupAdd(PermissionRequiredMixin, AddOrEditView):
permission_required = "peering.add_bgpgroup"
model = BGPGroup
form = BGPGroupForm
return_url = "peering:bgpgroup_list"
template = "peering/bgpgroup/add_edit.html"
class BGPGroupEdit(PermissionRequiredMixin, AddOrEditView):
permission_required = "peering.change_bgpgroup"
model = BGPGroup
form = BGPGroupForm
template = "peering/bgpgroup/add_edit.html"
class BGPGroupBulkEdit(PermissionRequiredMixin, BulkEditView):
permission_required = "peering.change_bgpgroup"
queryset = BGPGroup.objects.all()
filter = BGPGroupFilterSet
table = BGPGroupTable
form = BGPGroupBulkEditForm
class BGPGroupDelete(PermissionRequiredMixin, DeleteView):
permission_required = "peering.delete_bgpgroup"
model = BGPGroup
return_url = "peering:bgpgroup_list"
class BGPGroupBulkDelete(PermissionRequiredMixin, BulkDeleteView):
permission_required = "peering.delete_bgpgroup"
model = BGPGroup
filter = BGPGroupFilterSet
table = BGPGroupTable
class BGPGroupPeeringSessions(PermissionRequiredMixin, ModelListView):
permission_required = "peering.view_bgpgroup"
filter = DirectPeeringSessionFilterSet
filter_form = DirectPeeringSessionFilterForm
table = DirectPeeringSessionTable
template = "peering/bgpgroup/sessions.html"
hidden_filters = ["bgp_group"]
def build_queryset(self, request, kwargs):
queryset = None
if "slug" in kwargs:
instance = get_object_or_404(BGPGroup, slug=kwargs["slug"])
queryset = instance.directpeeringsession_set.prefetch_related(
"autonomous_system", "router"
).order_by("autonomous_system", "ip_address")
return queryset
def extra_context(self, kwargs):
extra_context = {"active_tab": "directsessions"}
if "slug" in kwargs:
extra_context.update(
{"instance": get_object_or_404(BGPGroup, slug=kwargs["slug"])}
)
return extra_context
class CommunityList(PermissionRequiredMixin, ModelListView):
permission_required = "peering.view_community"
queryset = Community.objects.all()
filter = CommunityFilterSet
filter_form = CommunityFilterForm
table = CommunityTable
template = "peering/community/list.html"
class CommunityAdd(PermissionRequiredMixin, AddOrEditView):
permission_required = "peering.add_community"
model = Community
form = CommunityForm
return_url = "peering:community_list"
template = "peering/community/add_edit.html"
class CommunityDetails(DetailsView):
permission_required = "peering.view_community"
queryset = Community.objects.all()
def get_context(self, request, **kwargs):
return {
"instance": get_object_or_404(self.queryset, **kwargs),
"active_tab": "main",
}
class CommunityEdit(PermissionRequiredMixin, AddOrEditView):
permission_required = "peering.change_community"
model = Community
form = CommunityForm
template = "peering/community/add_edit.html"
class CommunityDelete(PermissionRequiredMixin, DeleteView):
permission_required = "peering.delete_community"
model = Community
return_url = "peering:community_list"
class CommunityBulkDelete(PermissionRequiredMixin, BulkDeleteView):
permission_required = "peering.delete_community"
model = Community
filter = CommunityFilterSet
table = CommunityTable
class CommunityBulkEdit(PermissionRequiredMixin, BulkEditView):
permission_required = "peering.change_community"
queryset = Community.objects.all()
filter = CommunityFilterSet
table = CommunityTable
form = CommunityBulkEditForm
class ConfigurationList(PermissionRequiredMixin, ModelListView):
permission_required = "peering.view_configuration"
queryset = Configuration.objects.all()
filter = ConfigurationFilterSet
filter_form = ConfigurationFilterForm
table = ConfigurationTable
template = "peering/configuration/list.html"
class ConfigurationAdd(PermissionRequiredMixin, AddOrEditView):
permission_required = "peering.add_configuration"
model = Configuration
form = ConfigurationForm
template = "peering/configuration/add_edit.html"
return_url = "peering:configuration_list"
class ConfigurationDetails(DetailsView):
permission_required = "peering.view_configuration"
queryset = Configuration.objects.all()
def get_context(self, request, **kwargs):
instance = get_object_or_404(self.queryset, **kwargs)
return {
"instance": instance,
"routers": Router.objects.filter(configuration_template=instance),
"active_tab": "main",
}
class ConfigurationEdit(PermissionRequiredMixin, AddOrEditView):
permission_required = "peering.change_configuration"
model = Configuration
form = ConfigurationForm
template = "peering/configuration/add_edit.html"
class ConfigurationDelete(PermissionRequiredMixin, DeleteView):
permission_required = "peering.delete_configuration"
model = Configuration
return_url = "peering:configuration_list"
class ConfigurationBulkDelete(PermissionRequiredMixin, BulkDeleteView):
permission_required = "peering.delete_configuration"
model = Configuration
filter = ConfigurationFilterSet
table = ConfigurationTable
class DirectPeeringSessionAdd(PermissionRequiredMixin, AddOrEditView):
permission_required = "peering.add_directpeeringsession"
model = DirectPeeringSession
form = DirectPeeringSessionForm
template = "peering/directpeeringsession/add_edit.html"
class DirectPeeringSessionBulkDelete(PermissionRequiredMixin, BulkDeleteView):
permission_required = "peering.delete_directpeeringsession"
model = DirectPeeringSession
filter = DirectPeeringSessionFilterSet
table = DirectPeeringSessionTable
def filter_by_extra_context(self, queryset, request, kwargs):
# If we are on an AutonomousSystem context, filter the session with
# the given ASN
if "asn" in request.POST:
asn = request.POST.get("asn")
autonomous_system = get_object_or_404(AutonomousSystem, asn=asn)
return queryset.filter(autonomous_system=autonomous_system)
# If we are on an Router context, filter the session with
# the given Router ID
if "router_id" in request.POST:
router_id = int(request.POST.get("router_id"))
router = get_object_or_404(Router, pk=router_id)
return queryset.filter(router=router)
return queryset
class DirectPeeringSessionBulkEdit(PermissionRequiredMixin, BulkEditView):
permission_required = "peering.change_directpeeringsession"
queryset = DirectPeeringSession.objects.select_related("autonomous_system")
parent_object = BGPSession
filter = DirectPeeringSessionFilterSet
table = DirectPeeringSessionTable
form = DirectPeeringSessionBulkEditForm
class DirectPeeringSessionDelete(PermissionRequiredMixin, DeleteView):
permission_required = "peering.delete_directpeeringsession"
model = DirectPeeringSession
class DirectPeeringSessionDetails(DetailsView):
permission_required = "peering.view_directpeeringsession"
queryset = DirectPeeringSession.objects.all()
def get_context(self, request, **kwargs):
return {
"instance": get_object_or_404(self.queryset, **kwargs),
"active_tab": "main",
}
class DirectPeeringSessionEdit(PermissionRequiredMixin, AddOrEditView):
permission_required = "peering.change_directpeeringsession"
model = DirectPeeringSession
form = DirectPeeringSessionForm
template = "peering/directpeeringsession/add_edit.html"
class DirectPeeringSessionList(PermissionRequiredMixin, ModelListView):
permission_required = "peering.view_directpeeringsession"
queryset = DirectPeeringSession.objects.order_by(
"local_autonomous_system", "autonomous_system", "ip_address"
)
table = DirectPeeringSessionTable
filter = DirectPeeringSessionFilterSet
filter_form = DirectPeeringSessionFilterForm
template = "peering/directpeeringsession/list.html"
class EmailList(PermissionRequiredMixin, ModelListView):
permission_required = "peering.view_email"
queryset = Email.objects.all()
filter = EmailFilterSet
filter_form = EmailFilterForm
table = EmailTable
template = "peering/email/list.html"
class EmailAdd(PermissionRequiredMixin, AddOrEditView):
permission_required = "peering.add_email"
model = Email
form = EmailForm
template = "peering/email/add_edit.html"
return_url = "peering:email_list"
class EmailDetails(DetailsView):
permission_required = "peering.view_email"
queryset = Email.objects.all()
def get_context(self, request, **kwargs):
return {
"instance": get_object_or_404(self.queryset, **kwargs),
"active_tab": "main",
}
class EmailEdit(PermissionRequiredMixin, AddOrEditView):
permission_required = "peering.change_email"
model = Email
form = EmailForm
template = "peering/email/add_edit.html"
class EmailDelete(PermissionRequiredMixin, DeleteView):
permission_required = "peering.delete_email"
model = Email
return_url = "peering:email_list"
class EmailBulkDelete(PermissionRequiredMixin, BulkDeleteView):
permission_required = "peering.delete_email"
model = Email
filter = | |
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
This is an example of working with very large data. There are about
700,000 unduplicated donors in this database of Illinois political
campaign contributions.
With such a large set of input data, we cannot store all the comparisons
we need to make in memory. Instead, we will read the pairs on demand
from the MySQL database.
__Note:__ You will need to run `python mysql_init_db.py`
before running this script. See the annotates source for
[mysql_init_db.py](mysql_init_db.html)
For smaller datasets (<10,000), see our
[csv_example](csv_example.html)
"""
from __future__ import print_function
import os
import itertools
import time
import logging
import optparse
import locale
import pickle
import multiprocessing
import MySQLdb
import MySQLdb.cursors
import dedupe
import dedupe.backport
# ## Logging
# Dedupe uses Python logging to show or suppress verbose output. Added
# for convenience. To enable verbose output, run `python
# examples/mysql_example/mysql_example.py -v`
optp = optparse.OptionParser()
optp.add_option('-v', '--verbose', dest='verbose', action='count',
help='Increase verbosity (specify multiple times for more)'
)
(opts, args) = optp.parse_args()
log_level = logging.WARNING
if opts.verbose :
if opts.verbose == 1:
log_level = logging.INFO
elif opts.verbose >= 2:
log_level = logging.DEBUG
logging.getLogger().setLevel(log_level)
# ## Setup
MYSQL_CNF = os.path.abspath('.') + '/mysql.cnf'
settings_file = 'mysql_example_settings'
training_file = 'mysql_example_training.json'
start_time = time.time()
# You'll need to copy `examples/mysql_example/mysql.cnf_LOCAL` to
# `examples/mysql_example/mysql.cnf` and fill in your mysql database
# information in `examples/mysql_example/mysql.cnf`
# We use Server Side cursors (SSDictCursor and SSCursor) to [avoid
# having to have enormous result sets in memory](http://stackoverflow.com/questions/1808150/how-to-efficiently-use-mysqldb-sscursor).
con = MySQLdb.connect(db='contributions',
charset='utf8',
read_default_file = MYSQL_CNF,
cursorclass=MySQLdb.cursors.SSDictCursor)
c = con.cursor()
c.execute("SET net_write_timeout = 3600")
con2 = MySQLdb.connect(db='contributions',
charset='utf8',
read_default_file = MYSQL_CNF,
cursorclass=MySQLdb.cursors.SSCursor)
c2 = con2.cursor()
c2.execute("SET net_write_timeout = 3600")
# Increase max GROUP_CONCAT() length. The ability to concatenate long strings
# is needed a few times down below.
c.execute("SET group_concat_max_len = 10192")
# We'll be using variations on this following select statement to pull
# in campaign donor info.
#
# We did a fair amount of preprocessing of the fields in
# `mysql_init_db.py`
DONOR_SELECT = "SELECT donor_id, city, name, zip, state, address, " \
"occupation, employer, person from processed_donors"
# ## Training
if os.path.exists(settings_file):
print('reading from ', settings_file)
with open(settings_file, 'rb') as sf :
deduper = dedupe.StaticDedupe(sf, num_cores=4)
else:
# Define the fields dedupe will pay attention to
#
# The address, city, and zip fields are often missing, so we'll
# tell dedupe that, and we'll learn a model that take that into
# account
fields = [{'field' : 'name', 'variable name' : 'name',
'type': 'String'},
{'field' : 'address', 'type': 'String',
'variable name' : 'address', 'has missing' : True},
{'field' : 'city', 'type': 'String', 'has missing' : True},
{'field' : 'state', 'type': 'String', 'has missing': True},
{'field' : 'zip', 'type': 'String', 'has missing' : True},
{'field' : 'person', 'variable name' : 'person',
'type' : 'Exists'},
{'type' : 'Interaction',
'interaction variables' : ['person', 'address']},
{'type' : 'Interaction',
'interaction variables' : ['name', 'address']}
]
# Create a new deduper object and pass our data model to it.
deduper = dedupe.Dedupe(fields, num_cores=4)
# We will sample pairs from the entire donor table for training
c.execute(DONOR_SELECT)
temp_d = dict((i, row) for i, row in enumerate(c))
deduper.sample(temp_d, 10000)
del temp_d
# If we have training data saved from a previous run of dedupe,
# look for it an load it in.
#
# __Note:__ if you want to train from
# scratch, delete the training_file
if os.path.exists(training_file):
print('reading labeled examples from ', training_file)
with open(training_file) as tf :
deduper.readTraining(tf)
# ## Active learning
print('starting active labeling...')
# Starts the training loop. Dedupe will find the next pair of records
# it is least certain about and ask you to label them as duplicates
# or not.
# use 'y', 'n' and 'u' keys to flag duplicates
# press 'f' when you are finished
dedupe.convenience.consoleLabel(deduper)
# When finished, save our labeled, training pairs to disk
with open(training_file, 'w') as tf:
deduper.writeTraining(tf)
# Notice our the argument here
#
# `recall` is the proportion of true dupes pairs that the learned
# rules must cover. You may want to reduce this if your are making
# too many blocks and too many comparisons.
deduper.train(recall=0.90)
with open(settings_file, 'wb') as sf:
deduper.writeSettings(sf)
# We can now remove some of the memory hobbing objects we used
# for training
deduper.cleanupTraining()
## Blocking
print('blocking...')
# To run blocking on such a large set of data, we create a separate table
# that contains blocking keys and record ids
print('creating blocking_map database')
c.execute("DROP TABLE IF EXISTS blocking_map")
c.execute("CREATE TABLE blocking_map "
"(block_key VARCHAR(200), donor_id INTEGER) "
"CHARACTER SET utf8 COLLATE utf8_unicode_ci")
# If dedupe learned a Index Predicate, we have to take a pass
# through the data and create indices.
print('creating inverted index')
for field in deduper.blocker.index_fields :
c2.execute("SELECT DISTINCT {field} FROM processed_donors "
"WHERE {field} IS NOT NULL".format(field = field))
field_data = (row[0] for row in c2)
deduper.blocker.index(field_data, field)
# Now we are ready to write our blocking map table by creating a
# generator that yields unique `(block_key, donor_id)` tuples.
print('writing blocking map')
c.execute(DONOR_SELECT)
full_data = ((row['donor_id'], row) for row in c)
b_data = deduper.blocker(full_data)
# MySQL has a hard limit on the size of a data object that can be
# passed to it. To get around this, we chunk the blocked data in
# to groups of 30,000 blocks
step_size = 30000
# We will also speed up the writing by of blocking map by using
# parallel database writers
def dbWriter(sql, rows) :
conn = MySQLdb.connect(db='contributions',
charset='utf8',
read_default_file = MYSQL_CNF)
cursor = conn.cursor()
cursor.executemany(sql, rows)
cursor.close()
conn.commit()
conn.close()
pool = dedupe.backport.Pool(processes=2)
done = False
while not done :
chunks = (list(itertools.islice(b_data, step)) for step in [step_size]*100)
results = []
for chunk in chunks :
results.append(pool.apply_async(dbWriter,
("INSERT INTO blocking_map VALUES (%s, %s)",
chunk)))
for r in results :
r.wait()
if len(chunk) < step_size :
done = True
pool.close()
# Free up memory by removing indices we don't need anymore
deduper.blocker.resetIndices()
# Remove blocks that contain only one record, sort by block key and
# donor, key and index blocking map.
# These steps, particularly the sorting will let us quickly create
# blocks of data for comparison
print('prepare blocking table. this will probably take a while ...')
logging.info("indexing block_key")
c.execute("ALTER TABLE blocking_map "
"ADD UNIQUE INDEX (block_key, donor_id)")
c.execute("DROP TABLE IF EXISTS plural_key")
c.execute("DROP TABLE IF EXISTS plural_block")
c.execute("DROP TABLE IF EXISTS covered_blocks")
c.execute("DROP TABLE IF EXISTS smaller_coverage")
# Many block_keys will only form blocks that contain a single
# record. Since there are no comparisons possible within such a
# singleton block we can ignore them.
#
# Additionally, if more than one block_key forms identifical blocks
# we will only consider one of them.
logging.info("calculating plural_key")
c.execute("CREATE TABLE plural_key "
"(block_key VARCHAR(200), "
" block_id INTEGER UNSIGNED AUTO_INCREMENT, "
" PRIMARY KEY (block_id)) "
"(SELECT MIN(block_key) FROM "
" (SELECT block_key, "
" GROUP_CONCAT(donor_id ORDER BY donor_id) AS block "
" FROM blocking_map "
" GROUP BY block_key HAVING COUNT(*) > 1) AS blocks "
" GROUP BY block)")
logging.info("creating block_key index")
c.execute("CREATE UNIQUE INDEX block_key_idx ON plural_key (block_key)")
logging.info("calculating plural_block")
c.execute("CREATE TABLE plural_block "
"(SELECT block_id, donor_id "
" FROM blocking_map INNER JOIN plural_key "
" USING (block_key))")
logging.info("adding donor_id index and sorting index")
c.execute("ALTER TABLE plural_block "
"ADD INDEX (donor_id), "
"ADD UNIQUE INDEX (block_id, donor_id)")
# To use Kolb, et.al's Redundant Free Comparison scheme, we need to
# keep track of all the block_ids that are associated with a
# particular donor records. We'll use MySQL's GROUP_CONCAT function to
# do this. This function will truncate very long lists of associated
# ids, so the maximum string length to try to was increased just after the
# connection was initialized at the top of this file to try to avoid this.
logging.info("creating covered_blocks")
c.execute("CREATE TABLE covered_blocks "
"(SELECT donor_id, "
" GROUP_CONCAT(block_id ORDER BY block_id) AS sorted_ids "
" FROM plural_block "
" GROUP BY donor_id)")
c.execute("CREATE UNIQUE INDEX donor_idx ON covered_blocks (donor_id)")
# In particular, for every block of records, we need to keep
# track of a donor records's associated block_ids that are SMALLER than
# the current block's id. Because we ordered the ids when we did the
# GROUP_CONCAT we can achieve this by using some string hacks.
logging.info("creating smaller_coverage")
c.execute("CREATE TABLE smaller_coverage "
"(SELECT donor_id, block_id, "
" TRIM(',' FROM SUBSTRING_INDEX(sorted_ids, block_id, 1)) AS smaller_ids "
" FROM plural_block INNER JOIN covered_blocks "
" USING (donor_id))")
con.commit()
## Clustering
def candidates_gen(result_set) :
lset = set
block_id = None
records = | |
<filename>ap_perf/expression.py<gh_stars>1-10
from enum import Enum
import math
import numpy as np
import abc
# Type of the entity in the cnfusion matrix
class CM_Type(Enum):
TP = 1
FP = 2
FN = 3
TN = 4
AP = 5
AN = 6
PP = 7
PN = 8
ALL = 9
class CM_Category(Enum):
CELL = 1
ACTUAL_SUM = 2
PREDICTION_SUM = 3
ALL_SUM = 4
# for storing constants
class ConstantOverPQ():
def __init__(self):
self.cPQ = None
self.cPQ0 = None
self.cP0Q = None
self.cP0Q0 = None
self.c = None
def is_constant(self):
if (self.cPQ is None and self.cPQ0 is None and self.cP0Q is None and
self.cP0Q0 is None and self.c is not None):
return True
else:
return False
def __mul__(self, other):
result = ConstantOverPQ()
if self.cPQ is not None: result.cPQ = other * self.cPQ
if self.cPQ0 is not None: result.cPQ0 = other * self.cPQ0
if self.cP0Q is not None: result.cP0Q = other * self.cP0Q
if self.cP0Q0 is not None: result.cP0Q0 = other * self.cP0Q0
if self.c is not None: result.c = other * self.c
return result
@staticmethod
def add_none(x, y):
if x is None and y is None:
return None
elif x is None:
return y
elif y is None:
return x
else:
return x + y
def __add__(self, other):
result = ConstantOverPQ()
result.cPQ = ConstantOverPQ.add_none(self.cPQ, other.cPQ)
result.cPQ0 = ConstantOverPQ.add_none(self.cPQ0, other.cPQ0)
result.cP0Q = ConstantOverPQ.add_none(self.cP0Q, other.cP0Q)
result.cP0Q0 = ConstantOverPQ.add_none(self.cP0Q0, other.cP0Q0)
result.c = ConstantOverPQ.add_none(self.c, other.c)
return result
def __rmul__(self, other):
return self.__mul__(other)
def __truediv__(self, other):
return self.__mul__(1 / other)
# exception
class OperationUndefined(Exception):
pass
class UnsupportedMetric(Exception):
pass
class InputMismatch(Exception):
pass
# Entity in the confusion matrix
class CM_Entity:
def __init__(self, _type, category = CM_Category.CELL):
self._type = _type
self.category = category
def __pos__(self):
return EXPR_UnaryEntity(self, 1)
def __neg__(self):
return EXPR_UnaryEntity(self, -1)
def __add__(self, other):
if isinstance(other, CM_Entity):
if (self._type == CM_Type.TP and other._type == CM_Type.FP) or \
(self._type == CM_Type.FP and other._type == CM_Type.TP):
return CM_Entity(CM_Type.PP, CM_Category.PREDICTION_SUM)
elif (self._type == CM_Type.TP and other._type == CM_Type.FN) or \
(self._type == CM_Type.FN and other._type == CM_Type.TP):
return CM_Entity(CM_Type.AP, CM_Category.ACTUAL_SUM)
elif (self._type == CM_Type.TN and other._type == CM_Type.FN) or \
(self._type == CM_Type.FN and other._type == CM_Type.TN):
return CM_Entity(CM_Type.PN, CM_Category.PREDICTION_SUM)
elif (self._type == CM_Type.TN and other._type == CM_Type.FP) or \
(self._type == CM_Type.FP and other._type == CM_Type.TN):
return CM_Entity(CM_Type.AN, CM_Category.ACTUAL_SUM)
elif (self._type == CM_Type.AP and other._type == CM_Type.AN) or \
(self._type == CM_Type.AN and other._type == CM_Type.AP) or \
(self._type == CM_Type.PP and other._type == CM_Type.PN) or \
(self._type == CM_Type.PN and other._type == CM_Type.PP):
return CM_Entity(CM_Type.ALL, CM_Category.ALL_SUM)
else:
return EXPR_Addition(EXPR_UnaryEntity(self), EXPR_UnaryEntity(other))
else:
return EXPR_Addition(EXPR_UnaryEntity(self), other)
def __sub__(self, other):
return self.__add__(-other)
def __radd__(self, other):
if (type(other) == int or type(other) == float):
return EXPR_UnaryIdentity(other) + self
else:
return self + other
def __rsub__(self, other):
if (type(other) == int or type(other) == float):
return EXPR_UnaryIdentity(other) - self
else:
return -self + other
def __mul__(self, other):
if (type(other) == int or type(other) == float):
return EXPR_UnaryEntity(self, other)
elif isinstance(other, CM_Expression):
return EXPR_Multiplication(EXPR_UnaryEntity(self), other)
elif isinstance(other, CM_Entity):
return EXPR_Multiplication(EXPR_UnaryEntity(self), EXPR_UnaryEntity(other))
else:
raise OperationUndefined(" * operation over " + str(type(self)) +
" and " + str(type(other)) + "is undefined")
def __truediv__(self, other):
if (type(other) == int or type(other) == float):
return EXPR_UnaryEntity(self, 1/other)
elif isinstance(other, CM_Expression):
return EXPR_Fraction(EXPR_UnaryEntity(self), other)
elif isinstance(other, CM_Entity):
return EXPR_Fraction(EXPR_UnaryEntity(self), EXPR_UnaryEntity(other))
else:
raise OperationUndefined(" / operation over " + str(type(self)) +
" and " + str(type(other)) + "is undefined")
def __rmul__(self, other):
return self.__mul__(other)
def __pow__(self, other):
if (type(other) == int or type(other) == float):
return EXPR_Power(EXPR_UnaryEntity(self), other)
else:
raise OperationUndefined(" ** operation over " + str(type(self)) +
" and " + str(type(other)) + "is undefined")
def __ge__(self, other):
if (type(other) == int or type(other) == float):
return EXPR_Constraint(EXPR_UnaryEntity(self), other)
else:
raise OperationUndefined(" ** operation over " + str(type(self)) +
" and " + str(type(other)) + "is undefined")
def __repr__(self):
return str(self._type.name)
# Expression that combine entities
class CM_Expression:
def __init__(self):
# default values for expression
self.is_linear_tp_tn = True # is it linear w.r.t TP and TN
self.depends_cell_cm = False # does it depend on the cell of confusion matrix: tp, tn, fp, fn
self.depends_actual_sum = False # does it depend on actual sum statistics: AP & AN
self.depends_predicted_sum = False # does it depend on predicted sum statistics: PP & PN
self.is_constant = False # does it contain only numbers or {ALL} entity
self.needs_adv_sum_marg = False # does it need 'sum'-marginal of the adversary to compute
self.needs_pred_sum_marg = False # does it need 'sum'-marginal of the predictor to compute
self.is_constraint = False # is it a constraint (contains '>=')
def __neg__(self):
return EXPR_UnaryExpr(self, -1)
def __add__(self, other):
if (type(other) == int or type(other) == float):
return EXPR_Addition(self, EXPR_UnaryIdentity(other))
elif isinstance(other, CM_Entity):
return EXPR_Addition(self, EXPR_UnaryEntity(other))
else:
return EXPR_Addition(self, other)
def __sub__(self, other):
return self.__add__(-other)
def __radd__(self, other):
if (type(other) == int or type(other) == float):
return EXPR_UnaryIdentity(other) + self
else:
return self + other
def __rsub__(self, other):
if (type(other) == int or type(other) == float):
return EXPR_UnaryIdentity(other) - self
else:
return -self + other
def __mul__(self, other):
if (type(other) == int or type(other) == float):
return EXPR_UnaryExpr(self, other)
elif isinstance(other, CM_Expression):
return EXPR_Multiplication(self, other)
elif isinstance(other, CM_Entity):
return EXPR_Multiplication(self, EXPR_UnaryEntity(other))
else:
raise OperationUndefined(" * operation over " + str(type(self)) +
" and " + str(type(other)) + "is undefined")
def __truediv__(self, other):
if (type(other) == int or type(other) == float):
return EXPR_UnaryExpr(self, 1/other)
elif isinstance(other, CM_Expression):
return EXPR_Fraction(self, other)
elif isinstance(other, CM_Entity):
return EXPR_Fraction(self, EXPR_UnaryEntity(other))
else:
raise OperationUndefined(" / operation over " + str(type(self)) +
" and " + str(type(other)) + "is undefined")
def __rmul__(self, other):
return self.__mul__(other)
def __pow__(self, other):
if (type(other) == int or type(other) == float):
return EXPR_Power(self, other)
else:
raise OperationUndefined(" ** operation over " + str(type(self)) +
" and " + str(type(other)) + "is undefined")
def __ge__(self, other):
if (type(other) == int or type(other) == float):
if self.is_constraint == False:
return EXPR_Constraint(self, other)
else:
raise OperationUndefined("Left hand side of '>=' operator must not contains '>='")
else:
raise OperationUndefined(" ** operation over " + str(type(self)) +
" and " + str(type(other)) + "is undefined")
def info(self):
print("is_linear_tp_tn : ", self.is_linear_tp_tn)
print("depends_cell_cm : ", self.depends_cell_cm)
print("depends_actual_sum : ", self.depends_actual_sum)
print("depends_predicted_sum : ", self.depends_predicted_sum)
print("is_constant : ", self.is_constant)
print("needs_adv_sum_marg : ", self.needs_adv_sum_marg)
print("needs_pred_sum_marg : ", self.needs_pred_sum_marg)
@abc.abstractmethod
def compute_value(self, C_val):
pass
@abc.abstractmethod
def compute_scaling(self, m, info):
# m is n + 1, since the index are: 0,...,n
# should return an instance of ConstantOverPQ
pass
class EXPR_UnaryEntity(CM_Expression):
def __init__(self, entity, multiplier = 1):
self.entity = entity
self.multiplier = multiplier
super().__init__()
if self.entity.category == CM_Category.CELL:
self.depends_cell_cm = True
elif self.entity.category == CM_Category.ACTUAL_SUM:
self.depends_actual_sum = True
elif self.entity.category == CM_Category.PREDICTION_SUM:
self.depends_predicted_sum = True
elif self.entity.category == CM_Category.ALL_SUM:
self.is_constant = True
def __pos__(self):
return EXPR_UnaryEntity(self.entity, self.multiplier)
def __neg__(self):
return EXPR_UnaryEntity(self.entity, -self.multiplier)
def __mul__(self, other):
if (type(other) == int or type(other) == float):
return EXPR_UnaryEntity(self.entity, self.multiplier * other)
elif isinstance(other, CM_Expression):
return EXPR_Multiplication(self, other)
elif isinstance(other, CM_Entity):
return EXPR_Multiplication(self, EXPR_UnaryEntity(other))
else:
raise OperationUndefined(" * operation over " + str(type(self)) +
" and " + str(type(other)) + "is undefined")
def __truediv__(self, other):
if (type(other) == int or type(other) == float):
return EXPR_UnaryEntity(self.entity, self.multiplier / other)
elif isinstance(other, CM_Expression):
return EXPR_Fraction(self, other)
elif isinstance(other, CM_Entity):
return EXPR_Fraction(self, EXPR_UnaryEntity(other))
else:
raise OperationUndefined(" * operation over " + str(type(self)) +
" and " + str(type(other)) + "is undefined")
def __rmul__(self, other):
return self.__mul__(other)
def __repr__(self):
if self.multiplier == 1:
return str(self.entity)
elif self.multiplier == -1:
return "-" + str(self.entity)
else:
return str(self.multiplier) + " " + str(self.entity)
def compute_value(self, C_val):
if self.entity._type == CM_Type.TP:
return self.multiplier * C_val.tp
elif self.entity._type == CM_Type.FP:
return self.multiplier * C_val.fp
elif self.entity._type == CM_Type.FN:
return self.multiplier * C_val.fn
elif self.entity._type == CM_Type.TN:
return self.multiplier * C_val.tn
elif self.entity._type == CM_Type.AP:
return self.multiplier * C_val.ap
elif self.entity._type | |
import pandas as pd
import numpy as np
import math
import os
from scipy.interpolate import interp1d
import time
from sklearn.ensemble import RandomForestRegressor
import xgboost as xgb
from lightgbm import LGBMRegressor
from catboost import CatBoostRegressor
from information_measures import *
from joblib import Parallel, delayed
#from arch import arch_model
def rmspe(y_true, y_pred):
return (np.sqrt(np.mean(np.square((y_true - y_pred) / y_true))))
def log_return(list_stock_prices): # Stock prices are estimated through wap values
return np.log(list_stock_prices).diff()
def realized_volatility(series_log_return):
return np.sqrt(np.sum(series_log_return**2))
def compute_wap(book_pd):
wap = (book_pd['bid_price1'] * book_pd['ask_size1'] + book_pd['ask_price1'] * book_pd['bid_size1']) / (book_pd['bid_size1']+ book_pd['ask_size1'])
return wap
def realized_volatility_from_book_pd(book_stock_time):
wap = compute_wap(book_stock_time)
returns = log_return(wap)
volatility = realized_volatility(returns)
return volatility
def realized_volatility_per_time_id(file_path, prediction_column_name):
df_book_data = pd.read_parquet(file_path)
# Estimate stock price per time point
df_book_data['wap'] = compute_wap(df_book_data)
# Compute log return from wap values per time_id
df_book_data['log_return'] = df_book_data.groupby(['time_id'])['wap'].apply(log_return)
df_book_data = df_book_data[~df_book_data['log_return'].isnull()]
# Compute the square root of the sum of log return squared to get realized volatility
df_realized_vol_per_stock = pd.DataFrame(df_book_data.groupby(['time_id'])['log_return'].agg(realized_volatility)).reset_index()
# Formatting
df_realized_vol_per_stock = df_realized_vol_per_stock.rename(columns = {'log_return':prediction_column_name})
stock_id = file_path.split('=')[1]
df_realized_vol_per_stock['row_id'] = df_realized_vol_per_stock['time_id'].apply(lambda x:f'{stock_id}-{x}')
return df_realized_vol_per_stock[['row_id',prediction_column_name]]
def past_realized_volatility_per_stock(list_file,prediction_column_name):
df_past_realized = pd.DataFrame()
for file in list_file:
df_past_realized = pd.concat([df_past_realized,
realized_volatility_per_time_id(file,prediction_column_name)])
return df_past_realized
def stupidForestPrediction(book_path_train,prediction_column_name,train_targets_pd,book_path_test):
naive_predictions_train = past_realized_volatility_per_stock(list_file=book_path_train,prediction_column_name=prediction_column_name)
df_joined_train = train_targets_pd.merge(naive_predictions_train[['row_id','pred']], on = ['row_id'], how = 'left')
X = np.array(df_joined_train['pred']).reshape(-1,1)
y = np.array(df_joined_train['target']).reshape(-1,)
regr = RandomForestRegressor(random_state=0)
regr.fit(X, y)
naive_predictions_test = past_realized_volatility_per_stock(list_file=book_path_test,prediction_column_name='target')
yhat = regr.predict(np.array(naive_predictions_test['target']).reshape(-1,1))
updated_predictions = naive_predictions_test.copy()
updated_predictions['target'] = yhat
return updated_predictions
def garch_fit_predict_volatility(returns_series, N=10000):
model = arch_model(returns_series * N, p=1, q=1)
model_fit = model.fit(update_freq=0, disp='off')
yhat = model_fit.forecast(horizon=600, reindex=False)
pred_volatility = np.sqrt(np.sum(yhat.variance.values)) / N
return pred_volatility
def garch_volatility_per_time_id(file_path, prediction_column_name):
# read the data
df_book_data = pd.read_parquet(file_path)
# calculate the midprice (not the WAP)
df_book_data['midprice'] =(df_book_data['bid_price1'] + df_book_data['ask_price1'])/2
# leave only WAP for now
df_book_data = df_book_data[['time_id', 'seconds_in_bucket', 'midprice']]
df_book_data = df_book_data.sort_values('seconds_in_bucket')
# make the book updates evenly spaced
df_book_data_evenly = pd.DataFrame({'time_id':np.repeat(df_book_data['time_id'].unique(), 600),
'second':np.tile(range(0,600), df_book_data['time_id'].nunique())})
df_book_data_evenly['second'] = df_book_data_evenly['second'].astype(np.int16)
df_book_data_evenly = df_book_data_evenly.sort_values('second')
df_book_data_evenly = pd.merge_asof(df_book_data_evenly,
df_book_data,
left_on='second',right_on='seconds_in_bucket',
by = 'time_id')
# Ordering for easier use
df_book_data_evenly = df_book_data_evenly[['time_id', 'second', 'midprice']]
df_book_data_evenly = df_book_data_evenly.sort_values(['time_id','second']).reset_index(drop=True)
# calculate log returns
df_book_data_evenly['log_return'] = df_book_data_evenly.groupby(['time_id'])['midprice'].apply(log_return)
df_book_data_evenly = df_book_data_evenly[~df_book_data_evenly['log_return'].isnull()]
# fit GARCH(1, 1) and predict the volatility of returns
df_garch_vol_per_stock = \
pd.DataFrame(df_book_data_evenly.groupby(['time_id'])['log_return'].agg(garch_fit_predict_volatility)).reset_index()
df_garch_vol_per_stock = df_garch_vol_per_stock.rename(columns = {'log_return':prediction_column_name})
# add row_id column to the data
stock_id = file_path.split('=')[1]
df_garch_vol_per_stock['row_id'] = df_garch_vol_per_stock['time_id'].apply(lambda x:f'{stock_id}-{x}')
# return the result
return df_garch_vol_per_stock[['row_id', prediction_column_name]]
def garch_volatility_per_stock(list_file, prediction_column_name):
df_garch_predicted = pd.DataFrame()
for file in list_file:
df_garch_predicted = pd.concat([df_garch_predicted,
garch_volatility_per_time_id(file, prediction_column_name)])
return df_garch_predicted
def entropy_from_book(book_stock_time,last_min):
if last_min < 10:
book_stock_time = book_stock_time[book_stock_time['seconds_in_bucket'] >= (600-last_min*60)]
if book_stock_time.empty == True or book_stock_time.shape[0] < 3:
return 0
wap = compute_wap(book_stock_time)
t_init = book_stock_time['seconds_in_bucket']
t_new = np.arange(np.min(t_init),np.max(t_init))
# Closest neighbour interpolation (no changes in wap between lines)
nearest = interp1d(t_init, wap, kind='nearest')
resampled_wap = nearest(t_new)
# Compute sample entropy
# sampleEntropy = nolds.sampen(resampled_wap)
sampleEntropy = sampen(resampled_wap)
return sampleEntropy
def entropy_from_wap(wap,seconds,last_seconds):
if last_seconds < 600:
idx = np.where(seconds >= last_seconds)[0]
if len(idx) < 3:
return 0
else:
wap = wap[idx]
seconds = seconds[idx]
# Closest neighbour interpolation (no changes in wap between lines)
t_new = np.arange(np.min(seconds),np.max(seconds))
nearest = interp1d(seconds, wap, kind='nearest')
resampled_wap = nearest(t_new)
# Compute sample entropy
# sampleEntropy = nolds.sampen(resampled_wap)
sampleEntropy = sampen(resampled_wap)
# sampleEntropy = ApEn_new(resampled_wap,3,0.001)
return sampleEntropy
def linearFit(book_stock_time, last_min):
if last_min < 10:
book_stock_time = book_stock_time[book_stock_time['seconds_in_bucket'] >= (600-last_min*60)]
if book_stock_time.empty == True or book_stock_time.shape[0] < 2:
return 0
wap = np.array(compute_wap(book_stock_time))
t_init = book_stock_time['seconds_in_bucket']
return (wap[-1] - wap[0])/(np.max(t_init) - np.min(t_init))
def wapStat(book_stock_time, last_min):
if last_min < 10:
book_stock_time = book_stock_time[book_stock_time['seconds_in_bucket'] >= (600-last_min*60)]
if book_stock_time.empty == True or book_stock_time.shape[0] < 2:
return 0
wap = compute_wap(book_stock_time)
t_init = book_stock_time['seconds_in_bucket']
t_new = np.arange(np.min(t_init),np.max(t_init))
# Closest neighbour interpolation (no changes in wap between lines)
nearest = interp1d(t_init, wap, kind='nearest')
resampled_wap = nearest(t_new)
return np.std(resampled_wap)
def entropy_Prediction(book_path_train,prediction_column_name,train_targets_pd,book_path_test,all_stocks_ids,test_file):
# Compute features
book_features_encoded_test = computeFeatures_1(book_path_test,'test',test_file,all_stocks_ids)
book_features_encoded_train = computeFeatures_1(book_path_train,'train',train_targets_pd,all_stocks_ids)
X = book_features_encoded_train.drop(['row_id','target','stock_id'],axis=1)
y = book_features_encoded_train['target']
# Modeling
catboost_default = CatBoostRegressor(verbose=0)
catboost_default.fit(X,y)
# Predict
X_test = book_features_encoded_test.drop(['row_id','stock_id'],axis=1)
yhat = catboost_default.predict(X_test)
# Formatting
yhat_pd = pd.DataFrame(yhat,columns=['target'])
predictions = pd.concat([test_file,yhat_pd],axis=1)
return predictions
def computeFeatures_1(book_path,prediction_column_name,train_targets_pd,all_stocks_ids):
book_all_features = pd.DataFrame()
encoder = np.eye(len(all_stocks_ids))
stocks_id_list, row_id_list = [], []
volatility_list, entropy2_list = [], []
linearFit_list, linearFit5_list, linearFit2_list = [], [], []
wap_std_list, wap_std5_list, wap_std2_list = [], [], []
for file in book_path:
start = time.time()
book_stock = pd.read_parquet(file)
stock_id = file.split('=')[1]
print('stock id computing = ' + str(stock_id))
stock_time_ids = book_stock['time_id'].unique()
for time_id in stock_time_ids:
# Access book data at this time + stock
book_stock_time = book_stock[book_stock['time_id'] == time_id]
# Create feature matrix
stocks_id_list.append(stock_id)
row_id_list.append(str(f'{stock_id}-{time_id}'))
volatility_list.append(realized_volatility_from_book_pd(book_stock_time=book_stock_time))
entropy2_list.append(entropy_from_book(book_stock_time=book_stock_time,last_min=2))
linearFit_list.append(linearFit(book_stock_time=book_stock_time,last_min=10))
linearFit5_list.append(linearFit(book_stock_time=book_stock_time,last_min=5))
linearFit2_list.append(linearFit(book_stock_time=book_stock_time,last_min=2))
wap_std_list.append(wapStat(book_stock_time=book_stock_time,last_min=10))
wap_std5_list.append(wapStat(book_stock_time=book_stock_time,last_min=5))
wap_std2_list.append(wapStat(book_stock_time=book_stock_time,last_min=2))
print('Computing one stock entropy took', time.time() - start, 'seconds for stock ', stock_id)
# Merge targets
stocks_id_pd = pd.DataFrame(stocks_id_list,columns=['stock_id'])
row_id_pd = pd.DataFrame(row_id_list,columns=['row_id'])
volatility_pd = pd.DataFrame(volatility_list,columns=['volatility'])
entropy2_pd = pd.DataFrame(entropy2_list,columns=['entropy2'])
linearFit_pd = pd.DataFrame(linearFit_list,columns=['linearFit_coef'])
linearFit5_pd = pd.DataFrame(linearFit5_list,columns=['linearFit_coef5'])
linearFit2_pd = pd.DataFrame(linearFit2_list,columns=['linearFit_coef2'])
wap_std_pd = pd.DataFrame(wap_std_list,columns=['wap_std'])
wap_std5_pd = pd.DataFrame(wap_std5_list,columns=['wap_std5'])
wap_std2_pd = pd.DataFrame(wap_std2_list,columns=['wap_std2'])
book_all_features = pd.concat([stocks_id_pd,row_id_pd,volatility_pd,entropy2_pd,linearFit_pd,linearFit5_pd,linearFit2_pd,
wap_std_pd,wap_std5_pd,wap_std2_pd],axis=1)
# This line makes sure the predictions are aligned with the row_id in the submission file
book_all_features = train_targets_pd.merge(book_all_features, on = ['row_id'])
# Add encoded stock
encoded = list()
for i in range(book_all_features.shape[0]):
stock_id = book_all_features['stock_id'][i]
encoded_stock = encoder[np.where(all_stocks_ids == int(stock_id))[0],:]
encoded.append(encoded_stock)
encoded_pd = pd.DataFrame(np.array(encoded).reshape(book_all_features.shape[0],np.array(all_stocks_ids).shape[0]))
book_all_features_encoded = pd.concat([book_all_features, encoded_pd],axis=1)
return book_all_features_encoded
def calc_wap(df):
return (df['bid_price1'] * df['ask_size1'] + df['ask_price1'] * df['bid_size1']) / (df['bid_size1'] + df['ask_size1'])
def calc_wap2(df):
return (df['bid_price2'] * df['ask_size2'] + df['ask_price2'] * df['bid_size2']) / (df['bid_size2'] + df['ask_size2'])
def calc_wap3(df):
return (df['bid_price2'] * df['bid_size2'] + df['ask_price2'] * df['ask_size2']) / (df['bid_size2'] + df['ask_size2'])
def calc_wap4(df):
return (df['bid_price1'] * df['bid_size1'] + df['ask_price1'] * df['ask_size1']) / (df['bid_size1'] + df['ask_size1'])
def mid_price(df):
return df['bid_price1'] /2 + df['ask_price1'] / 2
def calc_rv_from_wap_numba(values, index):
log_return = np.diff(np.log(values))
realized_vol = np.sqrt(np.sum(np.square(log_return[1:])))
return realized_vol
def load_book_data_by_id(stock_id,datapath,train_test):
file_to_read = os.path.join(datapath,'book_' + str(train_test) + str('.parquet'),'stock_id=' + str(stock_id))
df = pd.read_parquet(file_to_read)
return df
def load_trades_data_by_id(stock_id,datapath,train_test):
file_to_read = os.path.join(datapath,'trade_' + str(train_test) + str('.parquet'),'stock_id=' + str(stock_id))
df = pd.read_parquet(file_to_read)
return df
def entropy_from_df(df):
if df.shape[0] < 3:
return 0
t_init = df['seconds_in_bucket']
t_new = np.arange(np.min(t_init),np.max(t_init))
# Closest neighbour interpolation (no changes in wap between lines)
nearest = interp1d(t_init, df['wap'], kind='nearest')
resampled_wap = nearest(t_new)
# Compute sample entropy
# sampleEntropy = nolds.sampen(resampled_wap)
sampleEntropy = sampen(resampled_wap)
return sampleEntropy
def entropy_from_df2(df):
if df.shape[0] < 3:
return 0
t_init = df['seconds_in_bucket']
t_new = np.arange(np.min(t_init),np.max(t_init))
# Closest neighbour interpolation (no changes in wap between lines)
nearest = interp1d(t_init, df['wap2'], kind='nearest')
resampled_wap = nearest(t_new)
# Compute sample entropy
# sampleEntropy = nolds.sampen(resampled_wap)
sampleEntropy = sampen(resampled_wap)
return sampleEntropy
def entropy_from_df3(df):
if df.shape[0] < 3:
return 0
t_init = df['seconds_in_bucket']
t_new = np.arange(np.min(t_init),np.max(t_init))
# Closest neighbour interpolation (no changes in wap between lines)
nearest = interp1d(t_init, df['wap3'], kind='nearest')
resampled_wap = nearest(t_new)
# Compute sample entropy
sampleEntropy = sampen(resampled_wap)
return sampleEntropy
def financial_metrics(df):
wap_imbalance = np.mean(df['wap'] - df['wap2'])
price_spread = np.mean((df['ask_price1'] - df['bid_price1']) / ((df['ask_price1'] + df['bid_price1'])/2))
bid_spread = np.mean(df['bid_price1'] - df['bid_price2'])
ask_spread = np.mean(df['ask_price1'] - df['ask_price2']) # Abs to take
total_volume = np.mean((df['ask_size1'] + df['ask_size2']) + (df['bid_size1'] + df['bid_size2']))
volume_imbalance = np.mean(abs((df['ask_size1'] + df['ask_size2']) - (df['bid_size1'] + df['bid_size2'])))
return [wap_imbalance,price_spread,bid_spread,ask_spread,total_volume,volume_imbalance]
def financial_metrics_2(df):
wap_imbalance = df['wap'] - df['wap2']
price_spread = (df['ask_price1'] - df['bid_price1']) / ((df['ask_price1'] + df['bid_price1'])/2)
bid_spread = df['bid_price1'] - df['bid_price2']
ask_spread = df['ask_price1'] - df['ask_price2'] # Abs to take
total_volume = (df['ask_size1'] + df['ask_size2']) + (df['bid_size1'] + df['bid_size2'])
volume_imbalance = abs((df['ask_size1'] + df['ask_size2']) - (df['bid_size1'] + df['bid_size2']))
# New features here
wap_imbalance_mean = np.mean(wap_imbalance)
wap_imbalance_sum = np.sum(wap_imbalance)
wap_imbalance_std = np.std(wap_imbalance)
wap_imbalance_max = np.max(wap_imbalance)
wap_imbalance_min = np.min(wap_imbalance)
price_spread_mean = np.mean(price_spread)
price_spread_sum = np.sum(price_spread)
price_spread_std = np.std(price_spread)
price_spread_max = np.max(price_spread)
price_spread_min = np.min(price_spread)
bid_spread_mean = np.mean(bid_spread)
bid_spread_sum = np.sum(bid_spread)
bid_spread_std = np.std(bid_spread)
bid_spread_max = np.max(bid_spread)
bid_spread_min = np.min(bid_spread)
ask_spread_mean = np.mean(ask_spread)
ask_spread_sum = np.sum(ask_spread)
ask_spread_std = np.std(ask_spread)
ask_spread_max = np.max(ask_spread)
ask_spread_min = np.min(ask_spread)
total_volume_mean = np.mean(total_volume)
total_volume_sum = np.sum(total_volume)
total_volume_std = np.std(total_volume)
total_volume_max = np.max(total_volume)
total_volume_min = np.min(total_volume)
volume_imbalance_mean = np.mean(volume_imbalance)
volume_imbalance_sum = np.sum(volume_imbalance)
volume_imbalance_std | |
<reponame>va7eex/docker-ldbprocessor
#!/bin/python3
__author__ = "<NAME>"
__credits__ = ["<NAME>"]
__license__ = "MIT"
__version__ = "1.0.1"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__status__ = "Production"
import os
import csv
import string
import random
import json
import time
import logging
from datetime import datetime
from flask import Flask
from flask import request, current_app, g
from flask import render_template
from flask import session, redirect, url_for
from flask import jsonify
from flaskext.mysql import MySQL
from pymysql.cursors import DictCursor
from flask_redis import FlaskRedis
from flask_selfdoc import Autodoc
from schema import Schema, And, Use, Optional, SchemaError
from markupsafe import escape
from BarcodePrinter import LabelMaker
from Barcode import Barcode
from LineItem import LineItemOS
from LineItem import LineItemAR
app = Flask(__name__)
auto = Autodoc(app)
#testing purposes only PLEASE CHANGE IN PROD
#this is literally the example key
app.secret_key = bytes(os.getenv("FLASK_SECRET").encode(encoding='UTF-8',errors='namereplace'))
app.config['MYSQL_DATABASE_HOST'] = os.getenv('MYSQL_IP')
app.config['MYSQL_DATABASE_PORT'] = int(os.getenv('MYSQL_PORT'))
app.config['MYSQL_DATABASE_USER'] = os.getenv('MYSQL_USER')
app.config['MYSQL_DATABASE_PASSWORD'] = os.getenv('MYSQL_PASSWORD')
app.config['MYSQL_DATABASE_DB'] = os.getenv('MYSQL_DB')
mysql = MySQL(cursorclass=DictCursor)
mysql.init_app(app)
app.config['REDIS_URL'] = f'redis://{os.getenv("REDIS_IP")}:{os.getenv("REDIS_PORT")}/0'
redis_client = FlaskRedis(app, decode_responses=True)
logging.basicConfig(level=int(os.getenv('LOGLEVEL')))
def __buildtables():
startconnecttime = time.time_ns()
while not mysql:
time.sleep(1)
pass
endconnecttime = time.time_ns()
logging.info(f'SQL Database reached after {endconnecttime-startconnecttime} ns')
connection = mysql.connect()
cur = connection.cursor()
cur.execute('''CREATE TABLE IF NOT EXISTS iteminfolist
(sku MEDIUMINT(8) UNSIGNED ZEROFILL,
price FLOAT(11,4),
oldprice FLOAT(11,4),
badbarcode BOOLEAN NOT NULL DEFAULT 0,
lastupdated TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP,
oldlastupdated TIMESTAMP,
PRIMARY KEY (sku))''')
cur.execute('''CREATE TABLE IF NOT EXISTS skubarcodelookup
(upc BIGINT UNSIGNED,
sku MEDIUMINT(8) UNSIGNED ZEROFILL,
timeadded TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
timemodified TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP,
PRIMARY KEY (upc))''')
#['SKU', 'Product Description', 'Product Category', 'Size', 'Qty', 'UOM', 'Price per UOM', 'Extended Price',
#'SU Quantity', 'SU Price', 'WPP Savings', 'Cont. Deposit', 'Original Order#']
cur.execute('''CREATE TABLE IF NOT EXISTS invoicelog (
id INT NOT NULL AUTO_INCREMENT,
sku MEDIUMINT(8) UNSIGNED ZEROFILL,
productdescription VARCHAR(255),
productcategory VARCHAR(255),
size VARCHAR(20),
qty SMALLINT UNSIGNED,
uom VARCHAR(20),
priceperuom FLOAT(11,4),
extendedprice FLOAT(11,4),
suquantity SMALLINT UNSIGNED,
suprice FLOAT(11,4),
wppsavings FLOAT(11,4),
contdeposit FLOAT(11,4),
refnum INT(10),
invoicedate DATE,
PRIMARY KEY (id))''')
cur.execute('''CREATE TABLE IF NOT EXISTS orderlog (
id INT NOT NULL AUTO_INCREMENT,
sku MEDIUMINT(8) UNSIGNED ZEROFILL,
upc BIGINT UNSIGNED,
productdescription VARCHAR(255),
sellingunitsize VARCHAR(32),
uom VARCHAR(20),
qty SMALLINT UNSIGNED,
orderdate DATE,
ordernumber INT UNSIGNED,
thirdparty BOOL,
PRIMARY KEY (id))''')
connection.commit()
cur.close()
__buildtables()
@app.route('/documentation')
def documentation():
return auto.html()
@app.route('/')
def page_index():
return render_template('index.html')
@app.route('/ar')
def page_ar():
return render_template('ar.html')
@app.route('/bc')
def page_bc():
if not 'scanner_terminal' in session:
return redirect(url_for('page_bc_register'))
return render_template('receiving.html')
@app.route('/osr')
def osrpage():
return render_template('osr.html')
@app.route('/inv')
def page_inv():
if not 'scanner_terminal' in session:
return redirect(url_for('page_bc_register'))
return render_template('inventory.html')
@app.route('/labelmaker')
def page_labelmaker():
return render_template('lp.html')
@app.before_request
def before_request():
g.connection = mysql.connect()
g.cur = g.connection.cursor()
@app.after_request
def after_request_func(response):
return response
@app.teardown_request
def teardown_request(error=None):
g.connection.commit()
g.cur.close()
#
# Barcode Processor
#
@app.route('/bc/register', methods=['GET','POST'])
@auto.doc(expected_type='application/json')
def page_bc_register():
"""Register endpoint with a mostly-unique name.
GET: Check if endpoint registered
POST: Set endpoint name.
"""
if request.method == 'GET':
if 'check' in request.args:
return {'success': bool('scanner_terminal' in session) }
if not 'scanner_terminal' in session:
return render_template('bcregister.html')
else:
return render_template('bcregister.html', scanner_terminal=escape(session["scanner_terminal"]))
if request.method == 'POST':
#register with random string if nothing else.
session['scanner_terminal'] = escape(request.form.get('scanner_terminal',''.join(random.choices(string.ascii_uppercase + string.digits, k=8))).lower())
if 'headless' in request.form:
return {'success': True}
return render_template('bcregister.html', scanner_terminal=escape(session["scanner_terminal"]))
def __sumRedisValues( list ):
return sum([int(i) for i in list if type(i)== int or i.isdigit()])
def __crossreference_UPCs(redisvalues):
processedredisvalues = {}
for key, qty in redisvalues.items():
result = __itemsearch(key)
if len(key) > 14 and key[:1] == '01':
#if the barcode is over 14 digits it won't match in the system, if the first two characters are 01 they're not useful anyways.
key = key[2:]
result = __itemsearch(key)
if result:
key = f"{result[len(result)-1]['sku']}, {result[len(result)-1]['productdescription']}"
processedredisvalues[key] = qty
else:
result = __itemsearch(key[2:-1])
if result:
key = f"{result[len(result)-1]['sku']}, !! {result[len(result)-1]['productdescription']}"
processedredisvalues[key] = qty
else:
processedredisvalues[key] = qty
return processedredisvalues
def __countBarcodes(scandate, crossref=False):
barcodes = {}
tally = {}
total = 0
for key in redis_client.scan_iter(match=f'{scandate}_ingest_*'):
key = str(key)
logging.info(key)
if crossref:
barcodes[key] = __crossreference_UPCs(redis_client.hgetall(key))
else:
barcodes[key] = redis_client.hgetall(key)
tally[key] = __sumRedisValues(redis_client.hvals(key))
total += tally[key]
logging.info(total, tally[key])
return barcodes, tally, total
@app.route('/bc/countbarcodes', methods=['GET'])
@auto.doc()
def bc_countbarcodes():
if not 'scanner_terminal' in session:
return {'success': False, 'reason': 'not registered'}, 401
datestamp = escape(request.args.get('datestamp',datetime.today().strftime('%Y%m%d')))
payload = {}
payload['barcodes'], payload['tally'], payload['total'] = __countBarcodes(datestamp)
return {'success': True, **payload}, 200
@app.route('/bc/lastscanned', methods=['GET'])
@auto.doc()
def bc_lastscan():
if not 'scanner_terminal' in session:
return {'success': False, 'reason': 'no session info'}, 401
if not redis_client.exists(f'lastscanned_{session["scanner_terminal"]}'):
return {'success': False, 'reason': 'nothing logged.'}, 204
return {'success': True, 'last_scanned': redis_client.get(f'lastscanned_{session["scanner_terminal"]}') }
@app.route('/bc/scan', methods=['POST'])
@auto.doc(expected_type='application/json')
def page_bcscan():
"""Scan into master record for today."""
if not 'scanner_terminal' in session:
return { 'success': False, 'reason': 'not registered'}, 401
upc, upctype = Barcode.BarcodeType(escape(request.form.get('upc',0)))
scangroup = escape(request.form.get('scangroup',0))
addremove = escape(request.form.get('addremove', 'add'))
datestamp = escape(request.form.get('datestamp',datetime.today().strftime('%Y%m%d')))
#TODO: https://supportcommunity.zebra.com/s/article/Determine-Barcode-Symbology-by-using-Symbol-or-AIM-Code-Identifiers?language=en_US
# implement this
logging.info(request.form)
redishashkey = f'{datestamp}_ingest_{escape(session["scanner_terminal"])}_{scangroup.zfill(3)}'
if 'remove' in addremove:
if not redis_client.hexists(redishashkey,upc):
return {'success': True, 'reason': 'nothing to do'}
if int(redis_client.hget(redishashkey,upc)) > 2:
redis_client.hincrby(redishashkey, upc,-1)
elif (redis_client.hget(redishashkey,upc)) <= 1:
redis_client.hdel(redishashkey,upc)
else:
redis_client.hincrby(redishashkey, upc,1)
logging.info(redishashkey, upc)
redis_client.expire(redishashkey, (60*60*24)*3) #expire this in 3 days to keep DB size small.
redis_client.set(f'lastscanned_{session["scanner_terminal"]}', upc)
payload = {}
if not 'machine' in request.form:
payload['barcodes'], payload['tally'], payload['total'] = __countBarcodes(datestamp)
return {'success': True, 'upc': upc, **payload}
@app.route('/bc/getstatus', methods=['GET'])
@auto.doc(expected_type='application/json')
def __bc_getstatus():
"""Returns data related to barcode scans."""
datestamp = escape(request.form.get('datestamp',datetime.today().strftime('%Y%m%d')))
payload = {}
payload['barcodes'], payload['_tally'], payload['__total'] = __countBarcodes(datestamp)
return jsonify(**payload)
def __bc_deleteRedisDB( scandate):
logging.info( f'Deleting databases for {scandate}:' )
count = 0
pipe = redis_client.pipeline()
for key in redis_client.scan_iter(str(f'{scandate}_ingest_{escape(session["scanner_terminal"])}') + '*'):
logging.info(f'\t{key}')
pipe.delete(key)
count += 1
pipe.execute()
return count
@app.route('/bc/deleteall', methods=['POST'])
@auto.doc()
def bc_deleteall():
if not 'scanner_terminal' in session:
return {'success': False, 'reason': 'nothing to do'}, 401
count = __bc_deleteRedisDB(escape(request.form.get('scandate',datetime.today().strftime('%Y%m%d'))))
return {'success': True, 'result': f'Deleted {count} tables.'}
@app.route('/bc/linksku', methods=['POST'])
@auto.doc()
def __bc_linksku():
pass
@app.route('/bc/exportscanlog', methods=['POST'])
@auto.doc(expected_type='application/json')
def bc_exportlog():
datestamp = escape(request.form.get('datestamp',datetime.today().strftime('%Y%m%d')))
with open(f'/var/ldbinvoice/{datestamp}_receiving_scan_log.txt', 'w') as f:
json.dump(__bc_getstatus(), f, indent=2, sort_keys=True)
return {'success': True}
@app.route('/bc/del', methods=['DELETE'])
@auto.doc()
def __bc_del():
pass
@app.route('/bc/new', methods=['POST'])
@auto.doc()
def __bc_new():
pass
@app.route('/bc/get', methods=['GET', 'POST'])
@auto.doc()
def __bc_get():
pass
#
#OrderSubmission
#
@app.route('/osr/vieworder')
def page_osr_vieworder():
"""Return everything."""
query = f'SELECT DISTINCT orderdate FROM orderlog'
g.cur.execute(query)
rows = g.cur.fetchall()
return render_template('vieworder.html', dates=rows)
@app.route('/osr/getorderdates')
@auto.doc(expected_type='application/json')
def __osr_georderdates():
"""Returns the date of all Order Submission Reports on record."""
query = f'SELECT DISTINCT orderdate FROM orderlog'
g.cur.execute(query)
rows = g.cur.fetchall()
returnrows = {}
for row in range(len(rows)):
returnrows[row] = rows.pop(0)
return returnrows
@app.route('/osr/getordernumber')
@auto.doc(expected_type='application/json', args={
'orderdate': 'Date in YYYY-MM-DD'
})
def __osr_getordernumber():
"""Returns the Order Number(s) for a given date.
:param str orderdate: Date of order, YYYY-MM-DD.
"""
orderdate = escape(request.args.get('orderdate',''))
query = f'SELECT DISTINCT ordernumber FROM orderlog WHERE orderdate={orderdate}'
g.cur.execute(query)
rows = g.cur.fetchall()
returnrows = {}
returnrows['orderdate'] = orderdate
returnrows['ordernumber'] = []
for row in rows:
returnrows['ordernumber'].append(row['ordernumber'])
return returnrows
@app.route('/osr/getorder', methods=['GET'])
@auto.doc(expected_type='application/json', args={
'ordernumber': 'Order number to retrieve',
'thirdparty': 'Include thirdparty warehouse suppliers'
})
#@use_kwargs({'ordernumber': fields.Str(), 'thirdparty': fields.Bool()})
def __osr_getorder():
"""Returns the full order details for a given Order Submission Report
:param int ordernumber: Order number of order to retrieve.
:param bool thirdparty: Return items stocked at third-party warehouses.
"""
ordernumber = escape(request.args.get('ordernumber',''))
thirdparty = escape(request.args.get('thirdparty',''))
query = f'SELECT id,sku,upc,productdescription,sellingunitsize,uom,qty,thirdparty FROM orderlog WHERE ordernumber={ordernumber}'
if thirdparty:
query += ' AND thirdparty=1'
g.cur.execute(query)
rows = g.cur.fetchall()
query = f'SELECT DISTINCT orderdate, ordernumber FROM orderlog WHERE ordernumber={ordernumber}'
g.cur.execute(query)
details = g.cur.fetchone()
returnrows = {}
returnrows['items'] = rows
logging.info(f'{returnrows} || {details}')
#for row in rows:
# returnrows['items'].append(rows.pop(0)
return { **returnrows, **details}
@app.route('/osr/addlineitem', methods=['POST'])
@auto.doc(expected_type='application/json', args={
'ordnum': 'Order Number on Order Submission Report',
'orddate': 'Order Date on Order Submission Report',
'thirdparty': 'Whether item is in the \'Thirdparty Wholesalers\' table of Order Submission Report'
})
def __osr_addlineitem():
"""Add a line item from an Order Submission Report to the database.
"""
ordernumber = escape(request.form.get('ordernumber','1'))
orderdate = escape(request.form.get('orderdate','19990101'))
thirdparty = escape(request.form.get('thirdparty',False))
restofrequest = request.form.to_dict(flat=True)
try:
li = LineItemOS(**restofrequest)
except Exception as err:
logging.error(err)
return {'success': False, 'reason': 'line item error'}
query = f'INSERT INTO orderlog (ordernumber, orderdate, {li.getkeysconcat()}, thirdparty ) VALUES ( {ordernumber}, \'{orderdate}\', {li.getvaluesconcat()}, {thirdparty} )'
g.cur.execute(query)
return {'success': True, 'lineitem': li.getall()}
#
# ARinvoice
#
@app.route('/ar/pricechange', methods=['GET','POST'])
@auto.doc(expected_type='application/json', args={
'invoicedate': 'Date of invoice in YYYY-MM-DD',
'sku': 'SKU of item',
'price': 'Price of item, float'
})
#@use_kwargs({'sku': fields.Str()})
def __ar_pricechange():
"""Perform a price comparison/change of an item.
:param str invoicedate: Date of invoice, YYYY-MM-DD.
:param int sku: SKU of item.
:param float price: Price of item.
"""
if request.method == 'POST':
sku = escape(request.form.get('sku','0'))
price = escape(request.form.get('price','0.0'))
query = f'SELECT * FROM iteminfolist WHERE sku={sku}'
g.cur.execute(query)
results = g.cur.fetchone()
newitem = False
oldprice = -1.0
oldlastupdated = '1979-01-01 01:01:01'
#logging.info(len(results), results)
if not results:
newitem = True
else:
oldprice = results['price']
oldlastupdated = results['lastupdated']
if float(results['price']) == float(price):
return {'newitem': False, **results }
query = f'INSERT INTO iteminfolist (sku, price) VALUES ({sku},{price}) ON DUPLICATE KEY UPDATE price={price}, oldprice={oldprice}, oldlastupdated=\'{oldlastupdated}\''
g.cur.execute(query)
# return {'newitem': newitem, 'sku': sku, 'price': price}
query = f'SELECT * FROM iteminfolist WHERE sku={sku}'
g.cur.execute(query)
| |
<filename>elegantrl/agents/AgentPPO.py
import torch
import numpy as np
from elegantrl.agents.AgentBase import AgentBase
from elegantrl.agents.net import ActorPPO, CriticPPO
from elegantrl.agents.net import ActorDiscretePPO, SharePPO
'''[ElegantRL.2021.12.12](github.com/AI4Fiance-Foundation/ElegantRL)'''
class AgentPPO(AgentBase):
"""
Bases: ``AgentBase``
PPO algorithm. “Proximal Policy Optimization Algorithms”. <NAME>. et al.. 2017.
:param net_dim[int]: the dimension of networks (the width of neural networks)
:param state_dim[int]: the dimension of state (the number of state vector)
:param action_dim[int]: the dimension of action (the number of discrete action)
:param learning_rate[float]: learning rate of optimizer
:param if_per_or_gae[bool]: PER (off-policy) or GAE (on-policy) for sparse reward
:param env_num[int]: the env number of VectorEnv. env_num == 1 means don't use VectorEnv
:param agent_id[int]: if the visible_gpu is '1,9,3,4', agent_id=1 means (1,9,4,3)[agent_id] == 9
"""
def __init__(self):
AgentBase.__init__(self)
self.ClassAct = ActorPPO
self.ClassCri = CriticPPO
self.if_off_policy = False
self.ratio_clip = 0.2 # could be 0.00 ~ 0.50 ratio.clamp(1 - clip, 1 + clip)
self.lambda_entropy = 0.02 # could be 0.00~0.10
self.lambda_a_value = 1.00 # could be 0.25~8.00, the lambda of advantage value
self.lambda_gae_adv = 0.98 # could be 0.95~0.99, GAE (Generalized Advantage Estimation. ICLR.2016.)
self.get_reward_sum = None # self.get_reward_sum_gae if if_use_gae else self.get_reward_sum_raw
self.if_use_old_traj = True
self.traj_list = None
def init(self, net_dim=256, state_dim=8, action_dim=2, reward_scale=1.0, gamma=0.99,
learning_rate=1e-4, if_per_or_gae=False, env_num=1, gpu_id=0):
"""
Explict call ``self.init()`` to overwrite the ``self.object`` in ``__init__()`` for multiprocessing.
"""
AgentBase.init(self, net_dim=net_dim, state_dim=state_dim, action_dim=action_dim,
reward_scale=reward_scale, gamma=gamma,
learning_rate=learning_rate, if_per_or_gae=if_per_or_gae,
env_num=env_num, gpu_id=gpu_id, )
self.traj_list = [[list() for _ in range(5)] for _ in range(env_num)]
self.env_num = env_num
if if_per_or_gae: # if_use_gae
self.get_reward_sum = self.get_reward_sum_gae
else:
self.get_reward_sum = self.get_reward_sum_raw
if env_num == 1:
self.explore_env = self.explore_one_env
else:
self.explore_env = self.explore_vec_env
def select_action(self, state: np.ndarray) -> np.ndarray:
"""
Select an action via a given state.
:param state: a state in a shape (state_dim, ).
:return: action [array], action.shape == (action_dim, ) where each action is clipped into range(-1, 1).
"""
s_tensor = torch.as_tensor(state[np.newaxis], device=self.device)
a_tensor = self.act(s_tensor)
action = a_tensor.detach().cpu().numpy()
return np.tanh(action) # the only different
def explore_one_env(self, env, target_step) -> list: # 247 second
"""
Collect trajectories through the actor-environment interaction.
:param env: the DRL environment instance.
:param target_step: the total step for the interaction.
:return: a list of trajectories [traj, ...] where `traj = [(state, other), ...]`.
"""
traj_list = list()
state = self.states[0]
'''get traj_list and last_done'''
step = 0
done = False
last_done = 0
while step < target_step or not done:
ten_s = torch.as_tensor(state, dtype=torch.float32).unsqueeze(0)
ten_a, ten_n = [ten.cpu() for ten in self.act.get_action(ten_s.to(self.device))]
next_s, reward, done, _ = env.step(np.tanh(ten_a[0].numpy()))
traj_list.append((ten_s, reward, done, ten_a, ten_n))
step += 1
if done:
state = env.reset()
last_done = step # behind `step += 1`
else:
state = next_s
last_done = (last_done,)
self.states[0] = state
# assert len(traj_list) == step
# assert len(traj_list[0]) == 5
# assert len(traj_list[0][0]) == self.env_num
'''convert traj_list -> buf_srdan'''
buf_srdan = list(map(list, zip(*traj_list))) # srdan: state, reward, done, action, noise
del traj_list
# assert len(buf_srdan) == 5
# assert len(buf_srdan[0]) == step
# assert len(buf_srdan[0][0]) == self.env_num
buf_srdan = [
torch.stack(buf_srdan[0]),
(torch.tensor(buf_srdan[1], dtype=torch.float32) * self.reward_scale).unsqueeze(0).unsqueeze(1),
((1 - torch.tensor(buf_srdan[2], dtype=torch.float32)) * self.gamma).unsqueeze(0).unsqueeze(1),
torch.stack(buf_srdan[3]),
torch.stack(buf_srdan[4]),
]
# assert all([buf_item.shape[:2] == (step, self.env_num) for buf_item in buf_srdan])
return self.splice_trajectory(buf_srdan, last_done)
def explore_vec_env(self, env, target_step) -> list:
"""
Collect trajectories through the actor-environment interaction for a **vectorized** environment instance.
:param env: the DRL environment instance.
:param target_step: the total step for the interaction.
:return: a list of trajectories [traj, ...] where each trajectory is a list of transitions [(state, other), ...].
"""
traj_list = list()
ten_s = self.states
step = 0
last_done = torch.zeros(self.env_num, dtype=torch.int, device=self.device)
while step < target_step:
ten_a, ten_n = self.act.get_action(ten_s)
ten_s_next, ten_rewards, ten_dones, _ = env.step(ten_a.tanh())
traj_list.append((ten_s.clone(), ten_rewards.clone(), ten_dones.clone(), ten_a, ten_n))
ten_s = ten_s_next
step += 1
last_done[torch.where(ten_dones)[0]] = step # behind `step+=1`
# if step % 64 == 0:
# print(';;', last_done.detach().cpu().numpy())
self.states = ten_s
# assert len(traj_list) == step
# assert len(traj_list[0]) == 5
# assert len(traj_list[0][0]) == self.env_num
buf_srdan = list(map(list, zip(*traj_list)))
del traj_list
# assert len(buf_srdan) == 5
# assert len(buf_srdan[0]) == step
# assert len(buf_srdan[0][0]) == self.env_num
buf_srdan[0] = torch.stack(buf_srdan[0])
buf_srdan[1] = (torch.stack(buf_srdan[1]) * self.reward_scale).unsqueeze(2)
buf_srdan[2] = ((1 - torch.stack(buf_srdan[2])) * self.gamma).unsqueeze(2)
buf_srdan[3] = torch.stack(buf_srdan[3])
buf_srdan[4] = torch.stack(buf_srdan[4])
# assert all([buf_item.shape[:2] == (step, self.env_num)
# for buf_item in buf_srdan])
return self.splice_trajectory(buf_srdan, last_done)
def update_net(self, buffer, batch_size, repeat_times, soft_update_tau):
"""
Update the neural networks by sampling batch data from `ReplayBuffer`.
.. note::
Using advantage normalization and entropy loss.
:param buffer: the ReplayBuffer instance that stores the trajectories.
:param batch_size: the size of batch data for Stochastic Gradient Descent (SGD).
:param repeat_times: the re-using times of each trajectory.
:param soft_update_tau: the soft update parameter.
:return: a tuple of the log information.
"""
with torch.no_grad():
buf_state, buf_reward, buf_mask, buf_action, buf_noise = [ten.to(self.device) for ten in buffer]
buf_len = buf_state.shape[0]
'''get buf_r_sum, buf_logprob'''
bs = 2 ** 10 # set a smaller 'BatchSize' when out of GPU memory.
buf_value = [self.cri_target(buf_state[i:i + bs]) for i in range(0, buf_len, bs)]
buf_value = torch.cat(buf_value, dim=0)
buf_logprob = self.act.get_old_logprob(buf_action, buf_noise)
buf_r_sum, buf_adv_v = self.get_reward_sum(buf_len, buf_reward, buf_mask, buf_value) # detach()
buf_adv_v = (buf_adv_v - buf_adv_v.mean()) * (self.lambda_a_value / (buf_adv_v.std() + 1e-5))
# buf_adv_v: buffer data of adv_v value
del buf_noise
obj_critic = None
obj_actor = None
assert buf_len >= batch_size
update_times = int(buf_len / batch_size * repeat_times)
for update_i in range(1, update_times + 1):
indices = torch.randint(buf_len, size=(batch_size,), requires_grad=False, device=self.device)
state = buf_state[indices]
r_sum = buf_r_sum[indices]
adv_v = buf_adv_v[indices]
action = buf_action[indices]
logprob = buf_logprob[indices]
'''PPO: Surrogate objective of Trust Region'''
new_logprob, obj_entropy = self.act.get_logprob_entropy(state, action) # it is obj_actor
ratio = (new_logprob - logprob.detach()).exp()
surrogate1 = adv_v * ratio
surrogate2 = adv_v * ratio.clamp(1 - self.ratio_clip, 1 + self.ratio_clip)
obj_surrogate = -torch.min(surrogate1, surrogate2).mean()
obj_actor = obj_surrogate + obj_entropy * self.lambda_entropy
self.optim_update(self.act_optim, obj_actor)
value = self.cri(state).squeeze(1) # critic network predicts the reward_sum (Q value) of state
obj_critic = self.criterion(value, r_sum)
self.optim_update(self.cri_optim, obj_critic / (r_sum.std() + 1e-6))
if self.if_use_cri_target:
self.soft_update(self.cri_target, self.cri, soft_update_tau)
a_std_log = getattr(self.act, 'a_std_log', torch.zeros(1)).mean()
return obj_critic.item(), obj_actor.item(), a_std_log.item() # logging_tuple
def get_reward_sum_raw(self, buf_len, buf_reward, buf_mask, buf_value) -> (torch.Tensor, torch.Tensor):
"""
Calculate the **reward-to-go** and **advantage estimation**.
:param buf_len: the length of the ``ReplayBuffer``.
:param buf_reward: a list of rewards for the state-action pairs.
:param buf_mask: a list of masks computed by the product of done signal and discount factor.
:param buf_value: a list of state values estimiated by the ``Critic`` network.
:return: the reward-to-go and advantage estimation.
"""
buf_r_sum = torch.empty(buf_len, dtype=torch.float32, device=self.device) # reward sum
pre_r_sum = 0
for i in range(buf_len - 1, -1, -1):
buf_r_sum[i] = buf_reward[i] + buf_mask[i] * pre_r_sum
pre_r_sum = buf_r_sum[i]
buf_adv_v = buf_r_sum - buf_value[:, 0]
return buf_r_sum, buf_adv_v
def get_reward_sum_gae(self, buf_len, ten_reward, ten_mask, ten_value) -> (torch.Tensor, torch.Tensor):
"""
Calculate the **reward-to-go** and **advantage estimation** using GAE.
:param buf_len: the length of the ``ReplayBuffer``.
:param ten_reward: a list of rewards for the state-action pairs.
:param ten_mask: a list of masks computed by the product of done signal and discount factor.
:param ten_value: a list of state values estimated by the ``Critic`` network.
:return: the reward-to-go and advantage estimation.
"""
buf_r_sum = torch.empty(buf_len, dtype=torch.float32, device=self.device) # old policy value
buf_adv_v = torch.empty(buf_len, dtype=torch.float32, device=self.device) # advantage value
pre_r_sum = 0
pre_adv_v = 0 # advantage value of previous step
for i in range(buf_len - 1, -1, -1): # Notice: mask = (1-done) * gamma
buf_r_sum[i] = ten_reward[i] + ten_mask[i] * pre_r_sum
pre_r_sum = buf_r_sum[i]
buf_adv_v[i] = ten_reward[i] + ten_mask[i] * pre_adv_v - ten_value[i]
pre_adv_v = ten_value[i] + buf_adv_v[i] * self.lambda_gae_adv
# ten_mask[i] * pre_adv_v == (1-done) * gamma * pre_adv_v
return buf_r_sum, buf_adv_v
def splice_trajectory(self, buf_srdan, last_done):
out_srdan = list()
for j in range(5):
cur_items = list()
buf_items = buf_srdan.pop(0) # buf_srdan[j]
for env_i in range(self.env_num):
last_step = last_done[env_i]
pre_item = self.traj_list[env_i][j]
if len(pre_item):
cur_items.append(pre_item)
cur_items.append(buf_items[:last_step, env_i])
if self.if_use_old_traj:
self.traj_list[env_i][j] = buf_items[last_step:, env_i]
out_srdan.append(torch.vstack(cur_items))
# print(';;;3', last_done.sum().item() / self.env_num, out_srdan[0].shape[0] / self.env_num)
# print(';;;4', out_srdan[1][-4:, -3:])
return [out_srdan, ] # = | |
#!/Users/nabin.acharya/anaconda/bin/python
from string import Template
import urllib
import zipfile
import StringIO
import os
import urllib2
import getopt
import sys
from pandas_datareader import data as pd_data
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
import datetime as dtt
from sklearn import svm, metrics, preprocessing
from sklearn.linear_model import LinearRegression
from sklearn.cross_validation import train_test_split
from sklearn.grid_search import GridSearchCV
from sklearn.metrics import make_scorer
from sklearn.metrics import r2_score
from sklearn import cross_validation as CV
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation, Flatten
from keras import optimizers
from sklearn.metrics import mean_squared_error
from sklearn.metrics import mean_absolute_error
# to turn off tensorflow warnings
os.environ['TF_CPP_MIN_LOG_LEVEL']='2'
class DataGather:
def __init__(self, ticker, start_date, end_date, dataDir="data"):
self.ticker = ticker
self.start_date = dtt.datetime.strptime(start_date, "%m-%d-%Y")
self.end_date = dtt.datetime.strptime(end_date, "%m-%d-%Y")
self.data_setup(dataDir)
self.dataDir = dataDir + "/" + ticker
def data_setup(self, dataDir):
if not os.path.isdir(dataDir):
os.mkdir(dataDir)
if not os.path.isdir(dataDir + "/" + self.ticker):
os.mkdir(dataDir + "/" + self.ticker)
self.dataDir = dataDir + "/" + self.ticker
def file_exists(self, url):
try:
ret = urllib2.urlopen(url)
return ret.code == 200
except:
return False
def get_retry(self, retry, url, dest):
if not os.path.exists(dest):
for i in range(0,retry):
while True:
try:
urllib.urlretrieve(url, dest)
except IOError:
continue
break
print("Received: " + dest + " from : " + url)
else:
print("Already have " + dest)
def GetShortedData(self):
s = Template('http://www.batstrading.com/market_data/shortsales/$year/$month/$fName-dl?mkt=bzx')
delta = dtt.timedelta(days=1)
d = self.start_date
diff = 0
count = 0
weekend = set([5,6])
while d <= self.end_date:
file_name = 'BATSshvol%s.txt.zip' % d.strftime('%Y%m%d')
dest = os.path.join(self.dataDir, file_name)
url = s.substitute(fName=file_name, year=d.year, month='%02d' % d.month)
if self.file_exists(url):
self.get_retry(5, url, dest)
else:
count+=1
if d.weekday() not in weekend:
diff += 1
d += delta
def ProcessShortDate(self, query_date):
self.query_date = dtt.datetime.strptime(query_date, "%m-%d-%Y")
file_name = 'BATSshvol%s.txt.zip' % self.query_date.strftime('%Y%m%d')
dest = os.path.join(self.dataDir, file_name)
return self.readShortRatio(dest)[self.ticker]
def readShortRatio(self,fName):
zipped = zipfile.ZipFile(fName)
lines = zipped.read(zipped.namelist()[0])
buf = StringIO.StringIO(lines)
df = pd.read_csv(buf,sep='|',index_col=1,parse_dates=False,dtype={'Date':object,'Short Volume':np.float32,'Total Volume':np.float32})
ratio = df['Short Volume']/df['Total Volume']
ratio.name = dtt.datetime.strptime(df['Date'][-1],'%Y%m%d')
return ratio
def ProcessShortedData(self, symbol):
sr = []
delta = dtt.timedelta(days=1)
d = self.start_date
diff = 0
count = 0
weekend = set([5,6])
while d <= self.end_date:
file_name = 'BATSshvol%s.txt.zip' % d.strftime('%Y%m%d')
dest = os.path.join(self.dataDir, file_name)
lastValue = 0
if os.path.exists(dest):
try:
lastValue = self.readShortRatio(dest)[symbol]
sr.append(lastValue)
except:
sr.append(lastValue)
if d.weekday() not in weekend:
diff += 1
d += delta
return sr
def GetStockDataSheet(self, dataDir="data"):
self.data_setup(dataDir)
data_source = 'yahoo'
fileName = "data/" + self.ticker + "/" + self.ticker + "-stock-data.csv"
tkr = [ self.ticker ]
if not os.path.exists(fileName):
panel_data = pd_data.DataReader(tkr, data_source, self.start_date, self.end_date)
adj_close = panel_data.ix['Adj Close']
adj_close['Volumes'] = panel_data.ix['Volume']
adj_close['Open'] = panel_data.ix['Open']
adj_close['Close'] = panel_data.ix['Close']
adj_close['Short_Ratio'] = self.ProcessShortedData(self.ticker)
adj_close['Mov_avg'] = panel_data.ix['Close'].rolling(100, min_periods=1).mean()
adj_close.to_csv(fileName)
print("Finished Writing " + fileName)
else:
print("Already exists: " + fileName)
class DataProcessing:
def __init__(self, dataGathered, dataDir="processed"):
self.dataGathered = dataGathered
self.data_setup(dataDir)
self.dataDir = dataDir + "/" + dataGathered.ticker
def data_setup(self, dataDir):
if not os.path.isdir(dataDir):
os.mkdir(dataDir)
if not os.path.isdir(dataDir + "/" + self.dataGathered.ticker):
os.mkdir(dataDir + "/" + self.dataGathered.ticker)
self.dataDir = dataDir + "/" + self.dataGathered.ticker
def data_read(self, percent=100.00):
fileName = self.dataGathered.dataDir + "/" + self.dataGathered.ticker + "-stock-data.csv"
print fileName
try:
self.dframe = pd.read_csv(fileName)
if (percent < 100.0):
rows = int(self.dframe.shape[0] * percent)
self.dframe = pd.read_csv(fileName, nrows= rows)
except IOError:
print "Unable to read: ", fileName
return None
self.dframe.fillna(method='ffill', inplace=True)
#print "Received ", self.dframe.shape[0], " samples, for ", self.dframe.shape[1], " features "
#print "Features: ", list(self.dframe.columns)
return self.dframe, self.dframe.shape[0]
def show(self):
x = pd.to_datetime(self.dframe['Date'])
#y = self.dframe['Close']
#y = self.dframe['Short_Ratio']
y = self.dframe['Mov_avg']
fig = plt.figure(figsize=(18,6))
gr = fig.add_subplot(111)
gr.plot(x,y,'b-o')
#ticklabels= [num2date(i).strftime("%Y-%m-%d") for i in graph.get_xticks().tolist()]
#graph.set_xticklabels(ticklabels)
plt.show()
plt.close()
def data_process(self, query_date, bufferLength=6, metric="Close"):
self.query_date = dtt.datetime.strptime(query_date, "%m-%d-%Y")
self.endquerydelta = abs(np.busday_count(self.query_date, self.dataGathered.end_date))
#print "Delta Num days: ", self.endquerydelta
self.dframe['Date'] = pd.to_datetime(self.dframe['Date']) # convert date string to datetime type
self.dframe['time_diff'] = (self.dframe['Date'] - self.dframe['Date'].min()) / np.timedelta64(1,'D')
dataf = pd.DataFrame(index=self.dframe.index)
for i in xrange(0, bufferLength + self.endquerydelta):
dataf["Behind%s" % str(i + 1)] = self.dframe[metric]
dataf['time_diff'] = self.dframe['time_diff']
train_cols = ['time_diff']
for i in xrange(0, bufferLength):
train_cols.append("Behind%s" % str(i + 1))
label_col = 'Behind' + str(bufferLength + self.endquerydelta)
#print label_col
dataf.dropna(inplace=True)
row_data = dataf[train_cols]
label_data = dataf[label_col]
self.scaler = preprocessing.StandardScaler().fit(row_data)
self.scaled_data = pd.DataFrame(self.scaler.transform(row_data))
self.label_data = label_data
self.final_row_unscaled = row_data.tail(1)
#
fileName = self.dataDir + "/" + self.dataGathered.ticker + "-stock-processed-dframe.csv"
self.dframe.to_csv(fileName)
def debug(self, query_date, bufferLength=5):
self.query_date = dtt.datetime.strptime(query_date, "%m-%d-%Y")
self.dframe['Date'] = pd.to_datetime(self.dframe['Date'])
self.dframe['time_diff'] = (self.dframe['Date'] - self.dframe['Date'].min()) / np.timedelta64(1,'D')
self.endquerydelta = abs(np.busday_count(self.query_date, self.dataGathered.end_date))
dataf = pd.DataFrame(index=self.dframe.index)
for i in xrange(0, bufferLength + self.endquerydelta):
dataf["Behind%s" % str(i + 1)] = self.dframe['Close']
dataf['time_diff'] = self.dframe['time_diff']
train_cols = ['time_diff']
for i in xrange(0, bufferLength):
train_cols.append("Behind%s" % str(i + 1))
print train_cols
label_col = 'Behind' + str(bufferLength + self.endquerydelta)
print label_col + " " + str(self.endquerydelta) + " " + str(bufferLength)
def bmark(self, predicted_val, test_val):
#print "R^2 test score:", r2_score(predicted_val, test_val)
RMSE = mean_squared_error(test_val, predicted_val)**0.5/100
print "RMSE test score:", RMSE
MAE = mean_absolute_error(test_val, predicted_val)/100
print "MAE test score:", MAE
def stock_linear_regression(self):
lreg = LinearRegression()
X_train, X_test, y_train, y_test = train_test_split(self.scaled_data, self.label_data, test_size=0.25, random_state=42)
parameters = {'fit_intercept': [True, False], 'normalize': [True, False], 'copy_X': [True, False]}
grid_obj = GridSearchCV(lreg, parameters, cv=None)
grid_obj.fit(X_train, y_train)
predict_train = grid_obj.predict(X_train)
#print "train score:", r2_score(predict_train, y_train)
predict_test = grid_obj.predict(X_test)
#self.bmark(predict_test, y_test)
self.grid_obj = grid_obj
def stock_svm(self):
clf = svm.SVR()
X_train, X_test, y_train, y_test = train_test_split(self.scaled_data, self.label_data, test_size=0.30, random_state=42)
#clf = svm.SVC(kernel='linear', C=1).fit(X_train, y_train)
parameters = {'C': [1, 10], 'epsilon': [0.1, 1e-2, 1e-3]}
r2_scorer = metrics.make_scorer(metrics.r2_score)
grid_obj = GridSearchCV(clf, param_grid=parameters, n_jobs=5, scoring=r2_scorer)
grid_obj.fit(X_train, y_train)
predict_train = grid_obj.predict(X_train)
#print "best svr params", grid_obj.best_params_
#print "train score:", r2_score(predict_train, y_train)
predict_test = grid_obj.predict(X_test)
#self.bmark(predict_test, y_test)
self.grid_obj = grid_obj
def stock_nn(self):
X_train, X_test, y_train, y_test = train_test_split(self.scaled_data.as_matrix(), self.label_data.as_matrix(), test_size=0.25, random_state=42)
model = Sequential()
model.add(Dense(220, activation="relu", kernel_initializer="normal", input_dim=X_train.shape[1]))
model.add(Dropout(0.15))
model.add(Dense(1, activation="linear", kernel_initializer="normal"))
model.compile(loss='mse', optimizer='adam')
model.fit(X_train, y_train, epochs=150, batch_size=25, verbose=0)
loss_and_metrics = model.evaluate(X_test, y_test, batch_size=128)
#self.bmark(predict_test, y_test)
#print "loss and metrics: ", loss_and_metrics
self.grid_obj = model
def train_test(self, ttype):
if (ttype == "linear"):
self.stock_linear_regression()
inputSeq = self.scaler.transform(self.final_row_unscaled)
inputSeq = pd.DataFrame(inputSeq)
self.pvalue = self.grid_obj.predict(inputSeq)[0]
elif (ttype == "svm"):
self.stock_svm()
inputSeq = self.scaler.transform(self.final_row_unscaled)
inputSeq = pd.DataFrame(inputSeq)
self.pvalue = self.grid_obj.predict(inputSeq)[0]
elif (ttype == "nn"):
self.stock_nn()
inputSeq = self.scaler.transform(self.final_row_unscaled)
self.pvalue = self.grid_obj.predict(inputSeq)[0][0]
def predict(self):
return self.pvalue
class BenchmarkTest:
def __init__(self, dataProcessed, query_date, dframe, rownum):
self.dataProcessed = dataProcessed
self.query_date = dtt.datetime.strptime(query_date, "%m-%d-%Y")
self.df = dframe
self.total_rows = dframe.shape[0]
self.rownum = rownum
def process(self, reg_type):
self.predict_val = []
self.test_val = []
self.sr =[]
print "Row: ", self.rownum , " total: ", self.total_rows
for i in range(self.rownum, self.total_rows):
self.test_val.append(self.df.iloc[i]['Close'])
qdate = dtt.datetime.strptime(self.df.iloc[i]['Date'],"%Y-%m-%d").strftime('%m-%d-%Y')
self.dataProcessed.data_process(qdate)
self.dataProcessed.train_test(reg_type)
self.predict_val.append(self.dataProcessed.predict())
self.sr.append(self.dataProcessed.dataGathered.ProcessShortDate(qdate))
def show(self):
# show the percent errors
print "MAE : ", mean_absolute_error(self.test_val, self.predict_val, multioutput='uniform_average')/100
print "RMSE : ", mean_squared_error(self.test_val, self.predict_val)**0.5/100
def usage():
print sys.argv[0], " --help "
print "\t" + "--gather/-g"
print "\t" + "--summary/-s "
print "\t" + "--benchmark/-b <svm/linear> <percent in float>"
def main():
gather_mode = False
summary_mode = False
benchmark_mode = False
try:
opts, args = getopt.getopt(sys.argv[1:], "f:sgb", ["help", "summary", "gather", "benchmark" ])
except getopt.GetoptError:
usage()
sys.exit(2)
if len(opts) == 0:
usage()
sys.exit(2)
for o, a in opts:
if o in ("-s", "--summary"):
summary_mode = True
break
if o in ("--gather", "-g"):
gather_mode = True
break
if o in ("--benchmark", "-b"):
benchmark_mode = True
if len(sys.argv) != 4:
usage()
sys.exit(2)
if sys.argv[2] not in ("svm", "linear"):
usage()
sys.exit(2)
try:
split_percent = float(sys.argv[3])
except:
usage()
sys.exit(2)
break
q0 = { 'ticker' : 'GOOG', 'begin' : '1-1-2017', 'end' : '5-12-2017', 'query' : '6-12-2017' , 'related' : [ 'BIDU', 'MSFT', 'AAPL', 'FB' ] }
q1 = { 'ticker' : 'AAPL', 'begin' : '1-1-2017', 'end' : '5-12-2017', 'query' : '6-12-2017' , 'related' : [ 'MSFT', 'GOOG', 'NVDA', 'ADBE' ] }
q2 = { 'ticker' : 'AMZN', 'begin' : '1-1-2017', 'end' : '5-12-2017', 'query' : '6-12-2017' , 'related' : [ 'GOOGL', 'TWX', 'CBS', 'NFLX' ] }
q3 = { 'ticker' : 'NFLX', 'begin' : '1-1-2017', 'end' : '5-12-2017', 'query' : '6-12-2017' , 'related' : [ 'AMZN', 'VIAB', 'FOXA', 'CBS' ] }
queries = [ q0, q1, q2, q3 ]
if gather_mode == True:
for q in queries:
dg = DataGather(q['ticker'], q['begin'], q['end'])
dg.GetShortedData()
dg.GetStockDataSheet()
sys.exit(0)
if benchmark_mode == True:
for q in queries:
dg = DataGather(q['ticker'], q['begin'], q['end'])
dg.GetStockDataSheet()
dp = DataProcessing(dg)
df_full, | |
Leakage': 0.00611897,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage with power gating': 0.00348781,
'Renaming Unit/Peak Dynamic': 4.56169,
'Renaming Unit/Runtime Dynamic': 0.146602,
'Renaming Unit/Subthreshold Leakage': 0.070483,
'Renaming Unit/Subthreshold Leakage with power gating': 0.0362779,
'Runtime Dynamic': 3.13543,
'Subthreshold Leakage': 6.21877,
'Subthreshold Leakage with power gating': 2.58311},
{'Area': 32.0201,
'Execution Unit/Area': 7.68434,
'Execution Unit/Complex ALUs/Area': 0.235435,
'Execution Unit/Complex ALUs/Gate Leakage': 0.0132646,
'Execution Unit/Complex ALUs/Peak Dynamic': 0.00363895,
'Execution Unit/Complex ALUs/Runtime Dynamic': 0.205547,
'Execution Unit/Complex ALUs/Subthreshold Leakage': 0.20111,
'Execution Unit/Complex ALUs/Subthreshold Leakage with power gating': 0.0754163,
'Execution Unit/Floating Point Units/Area': 4.6585,
'Execution Unit/Floating Point Units/Gate Leakage': 0.0656156,
'Execution Unit/Floating Point Units/Peak Dynamic': 0.0176015,
'Execution Unit/Floating Point Units/Runtime Dynamic': 0.304033,
'Execution Unit/Floating Point Units/Subthreshold Leakage': 0.994829,
'Execution Unit/Floating Point Units/Subthreshold Leakage with power gating': 0.373061,
'Execution Unit/Gate Leakage': 0.120359,
'Execution Unit/Instruction Scheduler/Area': 1.66526,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Area': 0.275653,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Gate Leakage': 0.000977433,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Peak Dynamic': 1.04181,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Runtime Dynamic': 0.0980865,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage': 0.0143453,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage with power gating': 0.00810519,
'Execution Unit/Instruction Scheduler/Gate Leakage': 0.00568913,
'Execution Unit/Instruction Scheduler/Instruction Window/Area': 0.805223,
'Execution Unit/Instruction Scheduler/Instruction Window/Gate Leakage': 0.00414562,
'Execution Unit/Instruction Scheduler/Instruction Window/Peak Dynamic': 1.6763,
'Execution Unit/Instruction Scheduler/Instruction Window/Runtime Dynamic': 0.15821,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage': 0.0625755,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage with power gating': 0.0355964,
'Execution Unit/Instruction Scheduler/Peak Dynamic': 3.82262,
'Execution Unit/Instruction Scheduler/ROB/Area': 0.584388,
'Execution Unit/Instruction Scheduler/ROB/Gate Leakage': 0.00056608,
'Execution Unit/Instruction Scheduler/ROB/Peak Dynamic': 1.10451,
'Execution Unit/Instruction Scheduler/ROB/Runtime Dynamic': 0.079859,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage': 0.00906853,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage with power gating': 0.00364446,
'Execution Unit/Instruction Scheduler/Runtime Dynamic': 0.336155,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage': 0.0859892,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage with power gating': 0.047346,
'Execution Unit/Integer ALUs/Area': 0.47087,
'Execution Unit/Integer ALUs/Gate Leakage': 0.0265291,
'Execution Unit/Integer ALUs/Peak Dynamic': 0.109483,
'Execution Unit/Integer ALUs/Runtime Dynamic': 0.101344,
'Execution Unit/Integer ALUs/Subthreshold Leakage': 0.40222,
'Execution Unit/Integer ALUs/Subthreshold Leakage with power gating': 0.150833,
'Execution Unit/Peak Dynamic': 4.06919,
'Execution Unit/Register Files/Area': 0.570804,
'Execution Unit/Register Files/Floating Point RF/Area': 0.208131,
'Execution Unit/Register Files/Floating Point RF/Gate Leakage': 0.000232788,
'Execution Unit/Register Files/Floating Point RF/Peak Dynamic': 0.00332529,
'Execution Unit/Register Files/Floating Point RF/Runtime Dynamic': 0.00411419,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage': 0.00399698,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage with power gating': 0.00176968,
'Execution Unit/Register Files/Gate Leakage': 0.000622708,
'Execution Unit/Register Files/Integer RF/Area': 0.362673,
'Execution Unit/Register Files/Integer RF/Gate Leakage': 0.00038992,
'Execution Unit/Register Files/Integer RF/Peak Dynamic': 0.0311963,
'Execution Unit/Register Files/Integer RF/Runtime Dynamic': 0.0304269,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage': 0.00614175,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage with power gating': 0.00246675,
'Execution Unit/Register Files/Peak Dynamic': 0.0345215,
'Execution Unit/Register Files/Runtime Dynamic': 0.0345411,
'Execution Unit/Register Files/Subthreshold Leakage': 0.0101387,
'Execution Unit/Register Files/Subthreshold Leakage with power gating': 0.00423643,
'Execution Unit/Results Broadcast Bus/Area Overhead': 0.0390912,
'Execution Unit/Results Broadcast Bus/Gate Leakage': 0.00537402,
'Execution Unit/Results Broadcast Bus/Peak Dynamic': 0.0666317,
'Execution Unit/Results Broadcast Bus/Runtime Dynamic': 0.167282,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage': 0.081478,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage with power gating': 0.0305543,
'Execution Unit/Runtime Dynamic': 1.1489,
'Execution Unit/Subthreshold Leakage': 1.79543,
'Execution Unit/Subthreshold Leakage with power gating': 0.688821,
'Gate Leakage': 0.368936,
'Instruction Fetch Unit/Area': 5.85939,
'Instruction Fetch Unit/Branch Predictor/Area': 0.138516,
'Instruction Fetch Unit/Branch Predictor/Chooser/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Chooser/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Chooser/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Chooser/Runtime Dynamic': 0.00116406,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/Gate Leakage': 0.000757657,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Runtime Dynamic': 0.00116406,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Area': 0.0257064,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Gate Leakage': 0.000154548,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Peak Dynamic': 0.0142575,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Runtime Dynamic': 0.0010297,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage': 0.00384344,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage with power gating': 0.00198631,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Area': 0.0151917,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Gate Leakage': 8.00196e-05,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Peak Dynamic': 0.00527447,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Runtime Dynamic': 0.000407257,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage': 0.00181347,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage with power gating': 0.000957045,
'Instruction Fetch Unit/Branch Predictor/Peak Dynamic': 0.0597838,
'Instruction Fetch Unit/Branch Predictor/RAS/Area': 0.0105732,
'Instruction Fetch Unit/Branch Predictor/RAS/Gate Leakage': 4.63858e-05,
'Instruction Fetch Unit/Branch Predictor/RAS/Peak Dynamic': 0.0117602,
'Instruction Fetch Unit/Branch Predictor/RAS/Runtime Dynamic': 0.000437085,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage': 0.000932505,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage with power gating': 0.000494733,
'Instruction Fetch Unit/Branch Predictor/Runtime Dynamic': 0.00379491,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage': 0.0199703,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage with power gating': 0.0103282,
'Instruction Fetch Unit/Branch Target Buffer/Area': 0.64954,
'Instruction Fetch Unit/Branch Target Buffer/Gate Leakage': 0.00272758,
'Instruction Fetch Unit/Branch Target Buffer/Peak Dynamic': 0.177867,
'Instruction Fetch Unit/Branch Target Buffer/Runtime Dynamic': 0.0105963,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage': 0.0811682,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage with power gating': 0.0435357,
'Instruction Fetch Unit/Gate Leakage': 0.0589979,
'Instruction Fetch Unit/Instruction Buffer/Area': 0.0226323,
'Instruction Fetch Unit/Instruction Buffer/Gate Leakage': 6.83558e-05,
'Instruction Fetch Unit/Instruction Buffer/Peak Dynamic': 0.606827,
'Instruction Fetch Unit/Instruction Buffer/Runtime Dynamic': 0.0292502,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage': 0.00151885,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage with power gating': 0.000701682,
'Instruction Fetch Unit/Instruction Cache/Area': 3.14635,
'Instruction Fetch Unit/Instruction Cache/Gate Leakage': 0.029931,
'Instruction Fetch Unit/Instruction Cache/Peak Dynamic': 1.86056,
'Instruction Fetch Unit/Instruction Cache/Runtime Dynamic': 0.0961689,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage': 0.367022,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage with power gating': 0.180386,
'Instruction Fetch Unit/Instruction Decoder/Area': 1.85799,
'Instruction Fetch Unit/Instruction Decoder/Gate Leakage': 0.0222493,
'Instruction Fetch Unit/Instruction Decoder/Peak Dynamic': 1.37404,
'Instruction Fetch Unit/Instruction Decoder/Runtime Dynamic': 0.0993468,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage': 0.442943,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage with power gating': 0.166104,
'Instruction Fetch Unit/Peak Dynamic': 4.16937,
'Instruction Fetch Unit/Runtime Dynamic': 0.239157,
'Instruction Fetch Unit/Subthreshold Leakage': 0.932286,
'Instruction Fetch Unit/Subthreshold Leakage with power gating': 0.40843,
'L2/Area': 4.53318,
'L2/Gate Leakage': 0.015464,
'L2/Peak Dynamic': 0.0531985,
'L2/Runtime Dynamic': 0.0143426,
'L2/Subthreshold Leakage': 0.834142,
'L2/Subthreshold Leakage with power gating': 0.401066,
'Load Store Unit/Area': 8.80901,
'Load Store Unit/Data Cache/Area': 6.84535,
'Load Store Unit/Data Cache/Gate Leakage': 0.0279261,
'Load Store Unit/Data Cache/Peak Dynamic': 1.99496,
'Load Store Unit/Data Cache/Runtime Dynamic': 0.385295,
'Load Store Unit/Data Cache/Subthreshold Leakage': 0.527675,
'Load Store Unit/Data Cache/Subthreshold Leakage with power gating': 0.25085,
'Load Store Unit/Gate Leakage': 0.0350888,
'Load Store Unit/LoadQ/Area': 0.0836782,
'Load Store Unit/LoadQ/Gate Leakage': 0.00059896,
'Load Store Unit/LoadQ/Peak Dynamic': 0.0245178,
'Load Store Unit/LoadQ/Runtime Dynamic': 0.0245178,
'Load Store Unit/LoadQ/Subthreshold Leakage': 0.00941961,
'Load Store Unit/LoadQ/Subthreshold Leakage with power gating': 0.00536918,
'Load Store Unit/Peak Dynamic': 2.11073,
'Load Store Unit/Runtime Dynamic': 0.530726,
'Load Store Unit/StoreQ/Area': 0.322079,
'Load Store Unit/StoreQ/Gate Leakage': 0.00329971,
'Load Store Unit/StoreQ/Peak Dynamic': 0.0604567,
'Load Store Unit/StoreQ/Runtime Dynamic': 0.120913,
'Load Store Unit/StoreQ/Subthreshold Leakage': 0.0345621,
'Load Store Unit/StoreQ/Subthreshold Leakage with power gating': 0.0197004,
'Load Store Unit/Subthreshold Leakage': 0.591321,
'Load Store Unit/Subthreshold Leakage with power gating': 0.283293,
'Memory Management Unit/Area': 0.4339,
'Memory Management Unit/Dtlb/Area': 0.0879726,
'Memory Management Unit/Dtlb/Gate Leakage': 0.00088729,
'Memory Management Unit/Dtlb/Peak Dynamic': 0.0214563,
'Memory Management Unit/Dtlb/Runtime Dynamic': 0.0222407,
'Memory Management Unit/Dtlb/Subthreshold Leakage': 0.0155699,
'Memory Management Unit/Dtlb/Subthreshold Leakage with power gating': 0.00887485,
'Memory Management Unit/Gate Leakage': 0.00808595,
'Memory Management Unit/Itlb/Area': 0.301552,
'Memory Management Unit/Itlb/Gate Leakage': 0.00393464,
'Memory Management Unit/Itlb/Peak Dynamic': 0.115683,
'Memory Management Unit/Itlb/Runtime Dynamic': 0.0158085,
'Memory Management Unit/Itlb/Subthreshold Leakage': 0.0413758,
'Memory Management Unit/Itlb/Subthreshold Leakage with power gating': 0.0235842,
'Memory Management Unit/Peak Dynamic': 0.30865,
'Memory Management Unit/Runtime Dynamic': 0.0380492,
'Memory Management Unit/Subthreshold Leakage': 0.0766103,
'Memory Management Unit/Subthreshold Leakage with power gating': 0.0398333,
'Peak Dynamic': 14.3006,
'Renaming Unit/Area': 0.303608,
'Renaming Unit/FP Front End RAT/Area': 0.131045,
'Renaming Unit/FP Front End RAT/Gate Leakage': 0.00351123,
'Renaming Unit/FP Front End RAT/Peak Dynamic': 2.51468,
'Renaming Unit/FP Front End RAT/Runtime Dynamic': 0.0087474,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage': 0.0308571,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage with power gating': 0.0175885,
'Renaming Unit/Free List/Area': 0.0340654,
'Renaming Unit/Free List/Gate Leakage': 2.5481e-05,
'Renaming Unit/Free List/Peak Dynamic': 0.0306032,
'Renaming Unit/Free List/Runtime Dynamic': 0.00453184,
'Renaming Unit/Free List/Subthreshold Leakage': 0.000370144,
'Renaming Unit/Free List/Subthreshold Leakage with power gating': 0.000201064,
'Renaming Unit/Gate Leakage': 0.00708398,
'Renaming Unit/Int Front End RAT/Area': 0.0941223,
'Renaming Unit/Int Front End RAT/Gate Leakage': 0.000283242,
'Renaming Unit/Int Front End RAT/Peak Dynamic': 0.731965,
'Renaming Unit/Int Front End RAT/Runtime Dynamic': 0.0498287,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage': | |
[task], mode)
if mode in ["bus_width_modulation","loop_iteration_modulation"]:
move_to_apply.design_space_size[mode] += len(value)
else:
move_to_apply.design_space_size[block_of_interest.type + "_"+ mode] += len(value)
for block_type in ["pe", "mem", "ic"]:
if block_type == block_of_interest.type:
move_to_apply.design_space_size[block_type +"_"+"mapping"] += (len(equal_imm_blocks_present_for_migration) - 1)
else:
move_to_apply.design_space_size[block_type +"_"+"mapping"] += 0
can_improve_routing = self.can_improve_routing(ex_dp, sim_dp, block_of_interest, task)
if can_improve_routing:
move_to_apply.design_space_size["routing"] += (len(buses) - 1)
move_to_apply.design_space_size["transfer"] += (len(buses)-1)
move_to_apply.design_space_size["identity"] += 1
# pick which transformation to apply
# Variables:
# hot_blck_synced: the block bottleneck
# selected_metric: metric to focus on
# selected_krnl: the kernel to focus on
# ------------------------------
def select_transformation(self, ex_dp, sim_dp, hot_blck_synced, selected_metric, selected_krnl, sorted_metric_dir):
feasible_transformations = self.get_feasible_transformations(ex_dp, sim_dp, hot_blck_synced, selected_metric,
selected_krnl, sorted_metric_dir)
print(list(feasible_transformations))
random.seed(datetime.now().microsecond)
# pick randomly at the moment.
# TODO: possibly can do better
transformation = random.choice(list(feasible_transformations))
#if len(hot_blck_synced.get_tasks_of_block_by_dir("write")) > 1:
# transformation = "split_swap"
#else:
# transformation = "swap"
if transformation == "migrate":
batch_mode = "single"
transformation_sub_name = "irrelevant"
elif transformation == "split":
# see if any task can run in parallel
batch_mode = "batch"
transformation_sub_name = "irrelevant"
elif transformation == "split_swap":
batch_mode = "single"
transformation_sub_name = "irrelevant"
elif transformation == "transfer":
batch_mode = "irrelevant"
transformation_sub_name = "locality_improvement"
elif transformation == "routing":
batch_mode = "irrelevant"
transformation_sub_name = "routing_improvement"
else:
transformation_sub_name = "irrelevant"
batch_mode = "irrelevant"
return transformation, transformation_sub_name, batch_mode, len(list(feasible_transformations))
# calculate the cost impact of a kernel improvement
def get_swap_improvement_cost(self, sim_dp, kernels, selected_metric, dir):
def get_subtype_for_cost(block):
if block.type == "pe" and block.subtype == "ip":
return "ip"
if block.type == "pe" and block.subtype == "gpp":
if "A53" in block.instance_name or "ARM" in block.instance_name:
return "arm"
if "G3" in block.instance_name:
return "dsp"
else:
return block.type
# Figure out whether there is a mapping that improves kernels performance
def no_swap_improvement_possible(sim_dp, selected_metric, metric_dir, krnl):
hot_block = sim_dp.get_dp_stats().get_hot_block_of_krnel(krnl.get_task_name(), selected_metric)
imm_block = self.dh.get_immediate_block_multi_metric(hot_block, metric_dir, [krnl.get_task()])
blah = hot_block.get_generic_instance_name()
blah2 = imm_block.get_generic_instance_name()
return hot_block.get_generic_instance_name() == imm_block.get_generic_instance_name()
# find the cost of improvement by comparing the current and accelerated design (for the kernel)
kernel_improvement_cost = {}
kernel_name_improvement_cost = {}
for krnel in kernels:
hot_block = sim_dp.get_dp_stats().get_hot_block_of_krnel(krnel.get_task_name(), selected_metric)
hot_block_subtype = get_subtype_for_cost(hot_block)
current_cost = self.database.db_input.porting_effort[hot_block_subtype]
#if hot_block_subtype == "ip":
# print("what")
imm_block = self.dh.get_immediate_block_multi_metric(hot_block,selected_metric, metric_dir,[krnel.get_task()])
imm_block_subtype = get_subtype_for_cost(imm_block)
imm_block_cost = self.database.db_input.porting_effort[imm_block_subtype]
improvement_cost = (imm_block_cost - current_cost)
kernel_improvement_cost[krnel] = improvement_cost
# calcualte inverse so lower means worse
max_val = max(kernel_improvement_cost.values()) # multiply by
kernel_improvement_cost_inverse = {}
for k, v in kernel_improvement_cost.items():
kernel_improvement_cost_inverse[k] = max_val - kernel_improvement_cost[k]
# get sum and normalize
sum_ = sum(list(kernel_improvement_cost_inverse.values()))
for k, v in kernel_improvement_cost_inverse.items():
# normalize
if not (sum_ == 0):
kernel_improvement_cost_inverse[k] = kernel_improvement_cost_inverse[k]/sum_
kernel_improvement_cost_inverse[k] = max(kernel_improvement_cost_inverse[k], .0000001)
if no_swap_improvement_possible(sim_dp, selected_metric, dir, k):
kernel_improvement_cost_inverse[k] = .0000001
kernel_name_improvement_cost[k.get_task_name()] = kernel_improvement_cost_inverse[k]
return kernel_improvement_cost_inverse
def get_identity_cost(self):
return self.database.db_input.porting_effort["ip"]
# calculate the cost impact of a kernel improvement
def get_swap_cost(self, sim_dp, krnl, selected_metric, sorted_metric_dir):
def get_subtype_for_cost(block):
if block.type == "pe" and block.subtype == "ip":
return "ip"
if block.type == "pe" and block.subtype == "gpp":
if "A53" in block.instance_name or "ARM" in block.instance_name:
return "arm"
if "G3" in block.instance_name:
return "dsp"
else:
return block.type
hot_block = sim_dp.get_dp_stats().get_hot_block_of_krnel(krnl.get_task_name(), selected_metric)
hot_block_subtype = get_subtype_for_cost(hot_block)
current_cost = self.database.db_input.porting_effort[hot_block_subtype]
imm_block = self.dh.get_immediate_block_multi_metric(hot_block,selected_metric, sorted_metric_dir,[krnl.get_task()])
imm_block_subtype = get_subtype_for_cost(imm_block)
imm_block_cost = self.database.db_input.porting_effort[imm_block_subtype]
improvement_cost = (imm_block_cost - current_cost)
return improvement_cost
def get_migrate_cost(self):
return 0
def get_transfer_cost(self):
return 0
def get_routing_cost(self):
return 0
def get_split_cost(self):
return 1
def get_migration_split_cost(self, transformation):
if transformation == "migrate":
return self.get_migrate_cost()
elif transformation == "split":
return self.get_split_cost()
else:
print("this transformation" + transformation + " is not supported for cost calculation")
exit(0)
# how much does it cost to improve the kernel for different transformations
def get_krnl_improvement_cost(self, ex_dp, sim_dp, krnls, selected_metric, move_sorted_metric_dir):
# whether you can apply the transformation for the krnel's block
def get_transformation_cost(sim_dp, selected_metric, move_sorted_metric_dir, krnl, transformation):
if transformation == "swap":
cost = self.get_swap_cost(sim_dp, krnl, selected_metric, move_sorted_metric_dir)
elif transformation in ["split", "migrate"]:
cost = self.get_migration_split_cost(transformation)
elif transformation in ["split_swap"]:
cost = self.get_migration_split_cost("split")
cost += self.get_swap_cost(sim_dp, krnl, selected_metric, move_sorted_metric_dir)
elif transformation in ["identity"]:
cost = self.get_identity_cost()
elif transformation in ["transfer"]:
cost = self.get_transfer_cost()
elif transformation in ["routing"]:
cost = self.get_routing_cost()
if cost == 0:
cost = self.min_cost_to_consider
return cost
krnl_improvement_cost = {}
# iterate through the kernels, find their feasible transformations and
# find cost
for krnl in krnls:
hot_block = sim_dp.get_dp_stats().get_hot_block_of_krnel(krnl.get_task_name(), selected_metric)
imm_block = self.dh.get_immediate_block_multi_metric(hot_block, selected_metric, move_sorted_metric_dir, [krnl.get_task()])
hot_blck_synced = self.dh.find_cores_hot_kernel_blck_bottlneck(ex_dp, hot_block)
feasible_trans = self.get_feasible_transformations(ex_dp, sim_dp, hot_blck_synced, selected_metric,
krnl,move_sorted_metric_dir)
for trans in feasible_trans:
cost = get_transformation_cost(sim_dp, selected_metric, move_sorted_metric_dir, krnl, trans)
krnl_improvement_cost[(krnl, trans)] = cost
return krnl_improvement_cost
# select a metric to improve on
def select_metric(self, sim_dp):
# prioritize metrics based on their distance contribution to goal
metric_prob_dict = {} # (metric:priority value) each value is in [0 ,1] interval
for metric in config.budgetted_metrics:
metric_prob_dict[metric] = sim_dp.dp_stats.dist_to_goal_per_metric(metric, config.metric_sel_dis_mode)/\
sim_dp.dp_stats.dist_to_goal(["power", "area", "latency"],
config.metric_sel_dis_mode)
# sort the metric based on distance (and whether the sort is probabilistic or exact).
# probabilistic sorting, first sort exactly, then use the exact value as a probability of selection
metric_prob_dict_sorted = {k: v for k, v in sorted(metric_prob_dict.items(), key=lambda item: item[1])}
if config.move_metric_ranking_mode== "exact":
selected_metric = list(metric_prob_dict_sorted.keys())[len(metric_prob_dict_sorted.keys()) -1]
else:
selected_metric = self.pick_from_prob_dict(metric_prob_dict_sorted)
sorted_low_to_high_metric_dir = {}
for metric, prob in metric_prob_dict_sorted.items():
move_dir = 1 # try to increase the metric value
if not sim_dp.dp_stats.fits_budget_for_metric_for_SOC(metric, 1):
move_dir = -1 # try to reduce the metric value
sorted_low_to_high_metric_dir[metric] = move_dir
return selected_metric, metric_prob_dict_sorted, sorted_low_to_high_metric_dir
# select direction for the move
def select_dir(self, sim_dp, metric):
move_dir = 1 # try to increase the metric value
if not sim_dp.dp_stats.fits_budget_for_metric_for_SOC(metric, 1):
move_dir = -1 # try to reduce the metric value
return move_dir
def filter_in_kernels_meeting_budget(self, selected_metric, sim_dp):
krnls = sim_dp.get_dp_stats().get_kernels()
# filter the kernels whose workload already met the budget
workload_tasks = sim_dp.database.db_input.workload_tasks
task_workload = sim_dp.database.db_input.task_workload
workloads_to_consider = []
for workload in workload_tasks.keys():
if sim_dp.dp_stats.workload_fits_budget(workload, 1):
continue
workloads_to_consider.append(workload)
krnls_to_consider = []
for krnl in krnls:
if task_workload[krnl.get_task_name()] in workloads_to_consider and not krnl.get_task().is_task_dummy():
krnls_to_consider.append(krnl)
return krnls_to_consider
# get each kernels_contribution to the metric of interest
def get_kernels_s_contribution(self, selected_metric, sim_dp):
krnl_prob_dict = {} # (kernel, metric_value)
#krnls = sim_dp.get_dp_stats().get_kernels()
# filter it kernels whose workload meet the budget
krnls = self.filter_in_kernels_meeting_budget(selected_metric, sim_dp)
if krnls == []: # the design meets the budget, hence all kernels can be improved for cost improvement
krnls = sim_dp.get_dp_stats().get_kernels()
metric_total = sum([krnl.stats.get_metric(selected_metric) for krnl in krnls])
# sort kernels based on their contribution to the metric of interest
for krnl in krnls:
krnl_prob_dict[krnl] = krnl.stats.get_metric(selected_metric)/metric_total
if not "bottleneck" in self.move_s_krnel_selection:
for krnl in krnls:
krnl_prob_dict[krnl] = 1
return krnl_prob_dict
# get each_kernels_improvement_ease (ease = 1/cost)
def get_kernels_s_improvement_ease(self, ex_dp, sim_dp, selected_metric, move_sorted_metric_dir):
krnls = sim_dp.get_dp_stats().get_kernels()
krnl_improvement_ease = {}
if not "improvement_ease" in self.move_s_krnel_selection:
for krnl in krnls:
krnl_improvement_ease[krnl] = 1
else:
krnl_trans_improvement_cost = self.get_krnl_improvement_cost(ex_dp, sim_dp, krnls, selected_metric, move_sorted_metric_dir)
# normalize
# normalized and reverse (we need to reverse, so higher cost is worse, i.e., smaller)
krnl_trans_improvement_ease = {}
for krnl_trans, cost in krnl_trans_improvement_cost.items():
krnl_trans_improvement_ease[krnl_trans] = 1 / (cost)
max_ease = max(krnl_trans_improvement_ease.values())
for krnl_trans, ease in krnl_trans_improvement_ease.items():
krnl_trans_improvement_ease[krnl_trans] = ease / max_ease
for krnl in krnls:
krnl_improvement_ease[krnl] = 0
for krnl_trans, ease in krnl_trans_improvement_ease.items():
krnl, trans = krnl_trans
krnl_improvement_ease[krnl] = max(ease, krnl_improvement_ease[krnl])
return krnl_improvement_ease
# select the kernel for the move
def select_kernel(self, ex_dp, sim_dp, selected_metric, move_sorted_metric_dir):
# get each kernel's contributions
krnl_contribution_dict = self.get_kernels_s_contribution(selected_metric, sim_dp)
# get each kernel's improvement cost
krnl_improvement_ease = self.get_kernels_s_improvement_ease(ex_dp, sim_dp, selected_metric, move_sorted_metric_dir)
# combine the selections methods
# multiply the probabilities for a more complex metric
krnl_prob_dict = {}
for krnl in krnl_contribution_dict.keys():
krnl_prob_dict[krnl] = krnl_contribution_dict[krnl] * krnl_improvement_ease[krnl]
# give zero probablity to the krnls that you filtered out
for krnl in sim_dp.get_dp_stats().get_kernels():
if krnl not in krnl_prob_dict.keys():
krnl_prob_dict[krnl] = 0
# sort
#krnl_prob_dict_sorted = {k: v for k, v in sorted(krnl_prob_dict.items(), key=lambda item: item[1])}
krnl_prob_dict_sorted = sorted(krnl_prob_dict.items(), key=lambda item: item[1], reverse=True)
# get the worse kernel
if config.move_krnel_ranking_mode == "exact": # for area to allow us pick scenarios that are not necessarily the worst
#selected_krnl = list(krnl_prob_dict_sorted.keys())[
# len(krnl_prob_dict_sorted.keys()) - 1 - self.krnel_rnk_to_consider]
for krnl, prob in krnl_prob_dict_sorted:
if krnl.get_task_name() in self.krnels_not_to_consider:
continue
selected_krnl = krnl
break
else:
| |
<gh_stars>0
#! /usr/bin/env python
# -*- coding: utf-8 -*-
#
# GUI module generated by PAGE version 4.20
# in conjunction with Tcl version 8.6
# Feb 01, 2019 12:08:48 PM MST platform: Windows NT
###############################################################################
###############################################################################
#need to add in
import MODOAnalysis
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib
matplotlib.use('TkAgg')
import matplotlib.backends.backend_tkagg as tkagg
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg
from tkinter.filedialog import askopenfilename
from tkinter.filedialog import asksaveasfilename
from matplotlib.figure import Figure
import matplotlib.transforms
import itertools
#from pykrige.uk import UniversalKriging
from matplotlib.mlab import griddata
from matplotlib import cm
from scipy import stats
import seaborn as sns
np.random.seed(1234)
import warnings
warnings.filterwarnings("ignore")
#from tkinter import *
###############################################################################
###############################################################################
import sys
try:
import Tkinter as tk
except ImportError:
import tkinter as tk
try:
import ttk
py3 = False
except ImportError:
import tkinter.ttk as ttk
py3 = True
import GUI_support
from tkinter import *
def vp_start_gui():
'''Starting point when module is the main routine.'''
global val, w, root
root = tk.Tk()
# root.resizable(False, False)
root.resizable(True, True)
GUI_support.set_Tk_var()
top = Toplevel1 (root)
GUI_support.init(root, top)
root.mainloop()
w = None
def create_Toplevel1(root, *args, **kwargs):
'''Starting point when module is imported by another program.'''
global w, w_win, rt
rt = root
w = tk.Toplevel (root)
GUI_support.set_Tk_var()
top = Toplevel1 (w)
GUI_support.init(w, top, *args, **kwargs)
return (w, top)
def destroy_Toplevel1():
global w
w.quit()
w.destroy()
sys.exit('ladskflsdg')
w = None
class Toplevel1:
def __init__(self, top=None):
#gotta initialize a bunch of vars so I can check if they exist later
self.canvas=None
self.binspin_fig=None
self.artificial_binspin_fig=None
self.artificial_rose_ax=None
self.artifical_bin_canvas=None
self.ax=None
self.output_df=None
self.previewcanvas=None
self.preview_fig=None
self.preview_ax=None
self.min_x=None
self.min_y=None
self.max_x=None
self.max_y=None
self.min_gx=None
self.min_gy=None
self.max_gx=None
self.max_gy=None
self.grid_xy=None
self.dx=None
self.dx=None
self.artificial_fig=None
self.artifical_canvas=None
self.artificial_ax=None
################################################
################################################
self.mult_cont_fig=None
self.mult_cont_ax=None
self.mult_cont_canvas=None
self.preview_toolbar=None
self.mult_slide=False
self.beta_slide=False
self.mult_toolbar=None
self.sample=None
self.contour=None
################################################
################################################
self.mult_point_canvas=None
self.mult_point_fig=None
self.mult_point_toolbar=None
self.mult_point_marker=1
################################################
################################################
self.beta_point_canvas=None
self.beta_point_fig=None
self.beta_point_toolbar=None
self.beta_point_marker=1
################################################
################################################
self.beta_cont_fig=None
self.y_num=None
self.cont_grid_xy = None
self.cont_grid_x=None
self.cont_grid_y=None
self.mult_zi = None
self.mult_strike_marker=1
self.beta_strike_marker=1
self.beta_toolbar=None
self.beta_cont_canvas=None
self.beta_contour=None
self.beta_zi=None
self.hist_fig=None
self.hist_ax=None
self.beta_mult_fig=None
self.beta_mult_ax=None
self.beta_mult_canvas=None
self.uniform_df=None
self.hist_canvas=None
self.init=False
self.hist_toolbar=None
self.corr_toolbar=None
self.beta_mult_toolbar=None
self.rose_canvas=None
self.fdense_fig=None
self.fdense_zi=None
self.fdense_contour=None
self.fdense_cbar=None
self.fdense_canvas=None
self.fdense_ax=None
self.log_fdense_fig=None
self.log_fdense_zi=None
self.log_fdense_contour=None
self.log_fdense_cbar=None
self.log_fdense_canvas=None
self.log_fdense_ax=None
self.uniform_mult_fig=None
self.uniform_mult_zi=None
self.uniform_mult_contour=None
self.uniform_mult_cbar=None
self.uniform_mult_canvas=None
self.uniform_mult_ax=None
self.uniform_beta_fig=None
self.uniform_beta_zi=None
self.uniform_beta_contour=None
self.uniform_beta_cbar=None
self.uniform_beta_canvas=None
self.uniform_beta_ax=None
self.frac_mult_fig=None
self.frac_mult_zi=None
self.frac_mult_contour=None
self.frac_mult_cbar=None
self.frac_mult_canvas=None
self.frac_mult_ax=None
self.frac_beta_fig=None
self.frac_beta_zi=None
self.frac_beta_contour=None
self.frac_beta_cbar=None
self.frac_beta_canvas=None
self.frac_beta_ax=None
self.c1=None
self.c2=None
self.c3=None
self.c4=None
self.c5=None
self.c6=None
self.random_sample=None
self.cmaps = ['viridis', 'plasma', 'inferno', 'magma', 'Greys', 'Purples',
'Blues', 'Greens', 'Oranges', 'Reds', 'YlOrBr', 'YlOrRd',
'OrRd', 'PuRd', 'RdPu', 'BuPu', 'GnBu', 'PuBu', 'YlGnBu',
'PuBuGn', 'BuGn', 'YlGn','binary', 'gist_yarg',
'gist_gray', 'gray', 'bone', 'pink','spring', 'summer',
'autumn', 'winter', 'cool', 'Wistia','hot', 'afmhot',
'gist_heat', 'copper','PiYG', 'PRGn', 'BrBG',
'PuOr', 'RdGy', 'RdBu', 'RdYlBu', 'RdYlGn', 'Spectral',
'coolwarm', 'bwr', 'Pastel1', 'Pastel2', 'Paired', 'Accent',
'Dark2', 'Set1', 'Set2', 'Set3', 'tab10', 'tab20', 'tab20b',
'tab20c', 'flag', 'prism', 'ocean', 'gist_earth', 'terrain',
'gist_stern','gnuplot', 'gnuplot2', 'CMRmap', 'cubehelix',
'brg', 'hsv','gist_rainbow', 'rainbow', 'jet',
'nipy_spectral', 'gist_ncar']
'''This class configures and populates the toplevel window.
top is the toplevel containing window.'''
_bgcolor = '#d9d9d9' # X11 color: 'gray85'
_fgcolor = '#000000' # X11 color: 'black'
_compcolor = '#d9d9d9' # X11 color: 'gray85'
_ana1color = '#d9d9d9' # X11 color: 'gray85'
_ana2color = '#ececec' # Closest X11 color: 'gray92'
font10 = "-family {Courier New} -size 10 -weight normal -slant" \
" roman -underline 0 -overstrike 0"
font11 = "-family {Segoe UI} -size 8 -weight normal -slant " \
"roman -underline 0 -overstrike 0"
font12 = "-family {Segoe UI} -size 12 -weight bold -slant " \
"roman -underline 0 -overstrike 0"
font13 = "-family {Segoe UI} -size 14 -weight normal -slant " \
"roman -underline 0 -overstrike 0"
font14 = "-family {Segoe UI} -size 7 -weight normal -slant " \
"roman -underline 0 -overstrike 0"
font17="-family {Segoe UI} -size 12 -weight bold -slant " \
"roman -underline 0 -overstrike 0"
self.font15 = "-family {Segoe UI} -size 12 -weight bold -slant " \
"roman -underline 0 -overstrike 0"
self.style = ttk.Style()
if sys.platform == "win32":
self.style.theme_use('winnative')
self.style.configure('.',background=_bgcolor)
self.style.configure('.',foreground=_fgcolor)
self.style.configure('.',font="TkDefaultFont")
self.style.map('.',background=
[('selected', _compcolor), ('active',_ana2color)])
top.geometry("1049x700+543+128")
top.title("MODO ko MODO")
top.configure(background="#52d883")
top.configure(highlightbackground="#d9d9d9")
top.configure(highlightcolor="black")
self.style.configure('TNotebook.Tab', background=_bgcolor)
self.style.configure('TNotebook.Tab', foreground=_fgcolor)
self.style.map('TNotebook.Tab', background=
[('selected', _compcolor), ('active',_ana2color)])
self.TNotebook1 = ttk.Notebook(top)
self.TNotebook1.place(relx=0.01, rely=0.015, relheight=0.953
, relwidth=0.976)
self.TNotebook1.configure(width=1024)
# self.TNotebook1.configure(height=1024)
self.TNotebook1.configure(takefocus="")
self.TNotebook1_t0 = tk.Frame(self.TNotebook1)
self.TNotebook1.add(self.TNotebook1_t0, padding=3)
self.TNotebook1.tab(0, text="Main",compound="left",underline="-1",)
self.TNotebook1_t0.configure(background="#86bad8")
self.TNotebook1_t0.configure(highlightbackground="#d9d9d9")
self.TNotebook1_t0.configure(highlightcolor="black")
self.TNotebook1_t1 = tk.Frame(self.TNotebook1)
self.TNotebook1.add(self.TNotebook1_t1, padding=3)
self.TNotebook1.tab(1, text="Contour Plots", compound="left"
,underline="-1", )
self.TNotebook1_t1.configure(background="#86bad8")
self.TNotebook1_t1.configure(highlightbackground="#d9d9d9")
self.TNotebook1_t1.configure(highlightcolor="black")
###############################################################################
# Want to get rid of this tab cuz not using it for now
###############################################################################
self.TNotebook1_t2 = tk.Frame(self.TNotebook1)
self.TNotebook1.add(self.TNotebook1_t2, padding=3)
self.TNotebook1.tab(2, text="Colored Point Plots", compound="none", underline="-1"
,)
self.TNotebook1_t2.configure(background="#86bad8")
self.TNotebook1_t2.configure(highlightbackground="#d9d9d9")
self.TNotebook1_t2.configure(highlightcolor="black")
self.TNotebook1_t3 = tk.Frame(self.TNotebook1)
self.TNotebook1.add(self.TNotebook1_t3, padding=3)
self.TNotebook1.tab(3, text="Sample Plots", compound="none"
,underline="-1", )
self.TNotebook1_t3.configure(background="#86bad8")
self.TNotebook1_t3.configure(highlightbackground="#d9d9d9")
self.TNotebook1_t3.configure(highlightcolor="black")
self.TNotebook1_t4 = tk.Frame(self.TNotebook1)
self.TNotebook1.add(self.TNotebook1_t4, padding=3)
self.TNotebook1.tab(4, text="Summary Stats", compound="none"
,underline="-1", )
self.TNotebook1_t4.configure(background="#86bad8")
self.TNotebook1_t4.configure(highlightbackground="#d9d9d9")
self.TNotebook1_t4.configure(highlightcolor="black")
self.TNotebook1_t5 = tk.Frame(self.TNotebook1)
self.TNotebook1.add(self.TNotebook1_t5, padding=3)
self.TNotebook1.tab(5, text="Settings",compound="none",underline="-1",)
self.TNotebook1_t5.configure(background="#d9d9d9")
self.TNotebook1_t5.configure(highlightbackground="#d9d9d9")
self.TNotebook1_t5.configure(highlightcolor="black")
self.TNotebook1_t6 = tk.Frame(self.TNotebook1)
self.TNotebook1.add(self.TNotebook1_t6, padding=3)
self.TNotebook1.tab(6, text="Artificial Data",compound="none",underline="-1",)
self.TNotebook1_t6.configure(background="#86bad8")
self.TNotebook1_t6.configure(highlightbackground="#d9d9d9")
self.TNotebook1_t6.configure(highlightcolor="black")
###############################################################################
###############################################################################
#make sure this has command updateValue
self.s1 = tk.Scale(self.TNotebook1_t0, from_=10.0, to=500.0, command=self.updateValue)
###############################################################################
###############################################################################
self.s1.place(relx=0.02, rely=0.25, relwidth=0.104, relheight=0.0
, height=59, bordermode='ignore')
self.s1.configure(activebackground="#ececec")
self.s1.configure(background="#d9d9d9")
self.s1.configure(font="TkTextFont")
self.s1.configure(foreground="#000000")
self.s1.configure(highlightbackground="#d9d9d9")
self.s1.configure(highlightcolor="black")
self.s1.configure(label="X Grid Nodes")
self.s1.configure(orient="horizontal")
self.s1.configure(takefocus="0")
self.s1.configure(troughcolor="#f2f2f2")
###############################################################################
###############################################################################
#make sure this has command keep original
self.s2 = tk.Scale(self.TNotebook1_t0, from_=0.0, to=500.0, command=self.keep_original)
###############################################################################
###############################################################################
self.s2.place(relx=0.02, rely=0.342, relwidth=0.104, relheight=0.0
, height=59, bordermode='ignore')
self.s2.configure(activebackground="#ececec")
self.s2.configure(background="#d9d9d9")
self.s2.configure(font="TkTextFont")
self.s2.configure(foreground="#000000")
self.s2.configure(highlightbackground="#d9d9d9")
self.s2.configure(highlightcolor="black")
self.s2.configure(label="Y Grid Nodes")
self.s2.configure(orient="horizontal")
self.s2.configure(takefocus="0")
self.s2.configure(troughcolor="#afafaf")
self.s2.set(10)
###############################################################################
###############################################################################
#addded a scale here to show number of grid node
#and a fram to put the tool bar in
self.s3 = tk.Scale(self.TNotebook1_t0, from_=0.0,
to=250000.0, command=self.keep_original)
self.tab0_frame = tk.Frame(self.TNotebook1_t0)
###############################################################################
###############################################################################
self.s3.place(relx=0.02, rely=0.433, relwidth=0.104, relheight=0.0
, height=59, bordermode='ignore')
self.s3.configure(activebackground="#ececec")
self.s3.configure(background="#d9d9d9")
self.s3.configure(font="TkTextFont")
self.s3.configure(foreground="#000000")
self.s3.configure(highlightbackground="#d9d9d9")
self.s3.configure(highlightcolor="black")
self.s3.configure(label="Total Grid Nodes")
self.s3.configure(orient="horizontal")
self.s3.configure(takefocus="0")
self.s3.configure(troughcolor="#afafaf")
self.s3.set(100)
self.TSeparator1 = ttk.Separator(self.TNotebook1_t0)
self.TSeparator1.place(relx=0.0, rely=0.033, relwidth=0.147)
self.TSeparator1.configure(takefocus="0")
self.TSeparator2 = ttk.Separator(self.TNotebook1_t0)
self.TSeparator2.place(relx=0.147, rely=0.033, relheight=0.517)
self.TSeparator2.configure(orient="vertical")
self.TSeparator2.configure(takefocus="0")
self.TSeparator2 = ttk.Separator(self.TNotebook1_t0)
self.TSeparator2.place(relx=-0.005, rely=0.033, relheight=0.517)
self.TSeparator2.configure(orient="vertical")
self.TSeparator2.configure(takefocus="0")
self.TSeparator1 = ttk.Separator(self.TNotebook1_t0)
self.TSeparator1.place(relx=-0.01, rely=0.55, relwidth=0.157)
self.TSeparator1.configure(takefocus="0")
self.Label1_1 = tk.Label(self.TNotebook1_t0)
self.Label1_1.place(relx=0.005, rely=0.042, height=33, width=142)
self.Label1_1.configure(activebackground="#f9f9f9")
self.Label1_1.configure(activeforeground="black")
self.Label1_1.configure(background="#c1c1c1")
self.Label1_1.configure(disabledforeground="#a3a3a3")
self.Label1_1.configure(font=font12)
self.Label1_1.configure(foreground="#000000")
self.Label1_1.configure(highlightbackground="#d9d9d9")
self.Label1_1.configure(highlightcolor="black")
self.Label1_1.configure(text='''Grid Parameters''')
self.Average_Dist_Label = tk.Label(self.TNotebook1_t0,anchor='w')
self.Average_Dist_Label.place(relx=0.65, rely=0.95, height=33, width=350)
self.Average_Dist_Label.configure(activebackground="#f9f9f9")
self.Average_Dist_Label.configure(activeforeground="black")
self.Average_Dist_Label.configure(background="#86bad8")
self.Average_Dist_Label.configure(disabledforeground="#a3a3a3")
self.Average_Dist_Label.configure(foreground="#000000")
self.Average_Dist_Label.configure(highlightbackground="#d9d9d9")
self.Average_Dist_Label.configure(highlightcolor="black")
self.Average_Dist_Label.configure(font=self.font15)
self.Average_Dist_Label.configure(text='''Average Distance to nth Fracture:''')
self.FrontPearsonLabel = tk.Label(self.TNotebook1_t0,anchor='w')
self.FrontPearsonLabel.place(relx=0.65, rely=0.91, height=33, width=350)
self.FrontPearsonLabel.configure(activebackground="#f9f9f9")
self.FrontPearsonLabel.configure(activeforeground="black")
self.FrontPearsonLabel.configure(background="#86bad8")
self.FrontPearsonLabel.configure(disabledforeground="#a3a3a3")
self.FrontPearsonLabel.configure(foreground="#000000")
self.FrontPearsonLabel.configure(highlightbackground="#d9d9d9")
self.FrontPearsonLabel.configure(highlightcolor="black")
self.FrontPearsonLabel.configure(font=self.font15)
self.FrontPearsonLabel.configure(text='''Multinomial vs. Beta Pearson R:''')
self.TSeparator1 = ttk.Separator(self.TNotebook1_t0)
self.TSeparator1.place(relx=-0.01, rely=0.1, relwidth=0.157)
self.TSeparator1.configure(takefocus="0")
self.TSeparator1 = ttk.Separator(self.TNotebook1_t0)
self.TSeparator1.place(relx=-0.01, rely=0.225, relwidth=0.157)
self.TSeparator1.configure(takefocus="0")
self.Label2 = tk.Label(self.TNotebook1_t0)
self.Label2.place(relx=0.0, rely=0.108, height=71, width=144)
self.Label2.configure(activebackground="#f9f9f9")
self.Label2.configure(activeforeground="black")
self.Label2.configure(background="#d9d9d9")
self.Label2.configure(disabledforeground="#a3a3a3")
self.Label2.configure(font=font11)
self.Label2.configure(foreground="#000000")
self.Label2.configure(highlightbackground="#d9d9d9")
self.Label2.configure(highlightcolor="black")
self.Label2.configure(text='''Note: Only need to change the x value. The rest will update to keep aspect ratio.''')
self.Label2.configure(wraplength="125")
self.Label1_2 = tk.Label(self.TNotebook1_t0)
self.Label1_2.place(relx=0.157, rely=0.042, height=43, width=142)
self.Label1_2.configure(activebackground="#f9f9f9")
self.Label1_2.configure(activeforeground="black")
self.Label1_2.configure(background="#c1c1c1")
self.Label1_2.configure(disabledforeground="#a3a3a3")
self.Label1_2.configure(font=font12)
self.Label1_2.configure(foreground="#000000")
self.Label1_2.configure(highlightbackground="#d9d9d9")
self.Label1_2.configure(highlightcolor="black")
self.Label1_2.configure(text='''Multinomial Parameters''')
self.Label1_2.configure(wraplength="150")
self.TSeparator1 = ttk.Separator(self.TNotebook1_t0)
self.TSeparator1.place(relx=0.147, rely=0.55, relwidth=0.157)
self.TSeparator1.configure(takefocus="0")
self.TSeparator1 = ttk.Separator(self.TNotebook1_t0)
self.TSeparator1.place(relx=0.147, rely=0.033, relwidth=0.157)
self.TSeparator1.configure(takefocus="0")
self.TSeparator2 = ttk.Separator(self.TNotebook1_t0)
self.TSeparator2.place(relx=0.304, rely=0.033, relheight=0.617)
self.TSeparator2.configure(orient="vertical")
self.TSeparator2.configure(takefocus="0")
self.TSeparator1 = ttk.Separator(self.TNotebook1_t0)
self.TSeparator1.place(relx=0.147, rely=0.133, relwidth=0.157)
self.TSeparator1.configure(takefocus="0")
self.neighbors_val=IntVar(self.TNotebook1_t0)
self.neighbors_val.set(12)
self.neighbors = ttk.Combobox(self.TNotebook1_t0)
self.neighbors.place(relx=0.25, rely=0.133, relheight=0.035
, relwidth=0.042)
self.value_list = [10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,]
self.neighbors.configure(values=self.value_list)
self.neighbors.configure(textvariable=self.neighbors_val)
self.neighbors.configure(takefocus="")
self.Label1_3 = tk.Label(self.TNotebook1_t0)
self.Label1_3.place(relx=0.152, rely=0.133, height=21, width=99)
self.Label1_3.configure(activebackground="#f9f9f9")
self.Label1_3.configure(activeforeground="black")
self.Label1_3.configure(background="#86bad8")
self.Label1_3.configure(disabledforeground="#a3a3a3")
self.Label1_3.configure(foreground="#000000")
self.Label1_3.configure(highlightbackground="#d9d9d9")
self.Label1_3.configure(highlightcolor="black")
self.Label1_3.configure(text='''Neighbors to Avg''')
#######################################################################################
# Changing label to cuttof checkbox
######################################################################################
# self.Label1_4 = tk.Label(self.TNotebook1_t0)
# self.Label1_4.place(relx=0.157, rely=0.183, height=21, width=49)
# self.Label1_4.configure(activebackground="#86bad8")
# self.Label1_4.configure(activeforeground="black")
# self.Label1_4.configure(background="#86bad8")
# self.Label1_4.configure(disabledforeground="#a3a3a3")
# self.Label1_4.configure(foreground="#000000")
# self.Label1_4.configure(highlightbackground="#d9d9d9")
# self.Label1_4.configure(highlightcolor="black")
# self.Label1_4.configure(justify='left')
# self.Label1_4.configure(text='''Cuttoff r''')
self.cuttoff=IntVar(self.TNotebook1_t0)
self.Cuttoff_Check_Box = tk.Checkbutton(self.TNotebook1_t0)
self.Cuttoff_Check_Box.place(relx=0.15, rely=0.19, height=21, width=80)
self.Cuttoff_Check_Box.configure(activebackground="#86bad8")
self.Cuttoff_Check_Box.configure(activeforeground="#000000")
self.Cuttoff_Check_Box.configure(background="#86bad8")
self.Cuttoff_Check_Box.configure(disabledforeground="#86bad8")
self.Cuttoff_Check_Box.configure(foreground="#000000")
self.Cuttoff_Check_Box.configure(highlightbackground="#86bad8")
self.Cuttoff_Check_Box.configure(highlightcolor="#000000")
self.Cuttoff_Check_Box.configure(justify='left')
self.Cuttoff_Check_Box.configure(takefocus="0")
self.Cuttoff_Check_Box.configure(text='''Cuttoff r''')
self.Cuttoff_Check_Box.configure(variable=self.cuttoff)
# self.cuttoff_r=DoubleVar(self.TNotebook1_t0)
self.cuttoff_r = tk.Entry(self.TNotebook1_t0)
self.cuttoff_r.place(relx=0.246, rely=0.192,height=20, relwidth=0.052)
self.cuttoff_r.configure(background="white")
self.cuttoff_r.configure(disabledforeground="#a3a3a3")
self.cuttoff_r.configure(font="TkFixedFont")
self.cuttoff_r.configure(foreground="#000000")
self.cuttoff_r.configure(highlightbackground="#d9d9d9")
self.cuttoff_r.configure(highlightcolor="black")
self.cuttoff_r.configure(insertbackground="black")
self.cuttoff_r.configure(selectbackground="#c4c4c4")
self.cuttoff_r.configure(selectforeground="black")
self.cuttoff_r.configure(takefocus="0")
# self.cuttoff_r.configure(variable=self.cuttoff_r)
self.TSeparator2 = ttk.Separator(self.TNotebook1_t0)
self.TSeparator2.place(relx=0.461, rely=0.033, relheight=0.967)
self.TSeparator2.configure(orient="vertical")
self.TSeparator2.configure(takefocus="0")
self.TSeparator1 = ttk.Separator(self.TNotebook1_t0)
self.TSeparator1.place(relx=0.304, rely=0.65, relwidth=0.157)
self.TSeparator1.configure(takefocus="0")
self.TSeparator1 = ttk.Separator(self.TNotebook1_t0)
self.TSeparator1.place(relx=0.304, rely=0.033, relwidth=0.157)
self.TSeparator1.configure(takefocus="0")
self.TSeparator1 = ttk.Separator(self.TNotebook1_t0)
self.TSeparator1.place(relx=0.147, rely=0.225, relwidth=0.157)
self.TSeparator1.configure(takefocus="0")
self.Label1_8 = tk.Label(self.TNotebook1_t0)
self.Label1_8.place(relx=0.157, rely=0.242, height=33, width=142)
self.Label1_8.configure(activebackground="#f9f9f9")
self.Label1_8.configure(activeforeground="black")
self.Label1_8.configure(background="#c1c1c1")
self.Label1_8.configure(disabledforeground="#a3a3a3")
self.Label1_8.configure(font=font12)
self.Label1_8.configure(foreground="#000000")
self.Label1_8.configure(highlightbackground="#d9d9d9")
self.Label1_8.configure(highlightcolor="black")
self.Label1_8.configure(text='''Beta Parameters''')
self.TSeparator1 = ttk.Separator(self.TNotebook1_t0)
self.TSeparator1.place(relx=0.147, rely=0.308, relwidth=0.157)
self.TSeparator1.configure(takefocus="0")
self.theta_val=DoubleVar(self.TNotebook1_t0)
self.theta_val.set(4)
self.Theta = tk.Entry(self.TNotebook1_t0)
self.Theta.place(relx=0.216, rely=0.5,height=20, relwidth=0.082)
self.Theta.configure(background="white")
self.Theta.configure(disabledforeground="#a3a3a3")
self.Theta.configure(font="TkFixedFont")
self.Theta.configure(foreground="#000000")
self.Theta.configure(highlightbackground="#d9d9d9")
self.Theta.configure(highlightcolor="black")
self.Theta.configure(insertbackground="black")
self.Theta.configure(selectbackground="#c4c4c4")
self.Theta.configure(selectforeground="black")
self.Theta.configure(takefocus="0")
self.Theta.configure(textvariable=self.theta_val)
self.Label1_11 = tk.Label(self.TNotebook1_t0)
self.Label1_11.place(relx=0.152, rely=0.5, height=21, width=39)
self.Label1_11.configure(activebackground="#f9f9f9")
self.Label1_11.configure(activeforeground="black")
self.Label1_11.configure(background="#86bad8")
self.Label1_11.configure(disabledforeground="#a3a3a3")
self.Label1_11.configure(foreground="#000000")
self.Label1_11.configure(highlightbackground="#d9d9d9")
self.Label1_11.configure(highlightcolor="black")
self.Label1_11.configure(justify='left')
self.Label1_11.configure(text='''Theta''')
self.Label1_10 = tk.Label(self.TNotebook1_t0)
self.Label1_10.place(relx=0.15, rely=0.3, height=45, width=100)
self.Label1_10.configure(activebackground="#f9f9f9")
self.Label1_10.configure(activeforeground="black")
self.Label1_10.configure(background="#86bad8")
self.Label1_10.configure(disabledforeground="#a3a3a3")
self.Label1_10.configure(foreground="#000000")
self.Label1_10.configure(highlightbackground="#d9d9d9")
self.Label1_10.configure(highlightcolor="black")
self.Label1_10.configure(justify='left')
self.Label1_10.configure(text='''Dominant Fracture \n Set Median''')
self.Label1_10.config(font=('Arial',8))
self.Label1_10.lower(belowThis=None)
self.Label1_9 = tk.Label(self.TNotebook1_t0)
self.Label1_9.place(relx=0.152, rely=0.383, height=20, width=99)
self.Label1_9.configure(activebackground="#f9f9f9")
self.Label1_9.configure(activeforeground="black")
self.Label1_9.configure(background="#86bad8")
self.Label1_9.configure(disabledforeground="#a3a3a3")
self.Label1_9.configure(foreground="#000000")
self.Label1_9.configure(highlightbackground="#d9d9d9")
self.Label1_9.configure(highlightcolor="black")
self.Label1_9.configure(justify='left')
self.Label1_9.configure(text='''Spread (1 sigma)''')
self.bulk_sigma_val=DoubleVar(self.TNotebook1_t0)
self.bulk_sigma = tk.Entry(self.TNotebook1_t0)
self.bulk_sigma.place(relx=0.245, rely=0.383,height=20, relwidth=0.053)
self.bulk_sigma.configure(background="white")
self.bulk_sigma.configure(disabledforeground="#a3a3a3")
self.bulk_sigma.configure(font="TkFixedFont")
self.bulk_sigma.configure(foreground="#000000")
self.bulk_sigma.configure(highlightbackground="#d9d9d9")
self.bulk_sigma.configure(highlightcolor="black")
self.bulk_sigma.configure(insertbackground="black")
self.bulk_sigma.configure(selectbackground="#c4c4c4")
self.bulk_sigma.configure(selectforeground="black")
self.bulk_sigma.configure(takefocus="0")
self.bulk_sigma.configure(textvariable=self.bulk_sigma_val)
#####################################################################################
self.bulk_mean_val=tk.StringVar(self.TNotebook1_t0, value=15)
self.bulk_mean = tk.Entry(self.TNotebook1_t0,textvariable=self.bulk_mean_val)
self.bulk_mean.place(relx=0.245, rely=0.317,height=20, relwidth=0.053)
self.bulk_mean.configure(background="white")
self.bulk_mean.configure(disabledforeground="#a3a3a3")
self.bulk_mean.configure(font="TkFixedFont")
self.bulk_mean.configure(foreground="#000000")
self.bulk_mean.configure(highlightbackground="#d9d9d9")
self.bulk_mean.configure(highlightcolor="black")
self.bulk_mean.configure(insertbackground="black")
self.bulk_mean.configure(selectbackground="#c4c4c4")
self.bulk_mean.configure(selectforeground="black")
self.bulk_mean.configure(takefocus="0")
self.Label1_12 = tk.Label(self.TNotebook1_t0)
self.Label1_12.place(relx=0.152, | |
'''
@Author: ConghaoWong
@Date: 2019-12-20 09:39:02
LastEditors: <NAME>
LastEditTime: 2020-09-16 16:46:14
@Description: file content
'''
import os
import random
import cv2
import matplotlib.pyplot as plt
import numpy as np
from tqdm import tqdm
from helpmethods import (calculate_ADE_FDE_numpy, dir_check,
predict_linear_for_person)
from sceneFeature import TrajectoryMapManager
USE_SEED = True
SEED = 10
def prepare_rotate_matrix(min_angel=1, save_path='./rotate_matrix.npy', load=False):
need_to_re_calculate = True
if os.path.exists(save_path):
rotate_matrix = np.load(save_path)
if rotate_matrix.shape[0] == 360//min_angel:
need_to_re_calculate = False
if need_to_re_calculate:
angles = np.arange(0, 2 * np.pi, min_angel * np.pi / 180)
sin = np.sin(angles)
cos = np.cos(angles)
rotate_matrix = np.empty((angles.shape[0], 2, 2))
rotate_matrix[..., 0, 0] = cos
rotate_matrix[..., 0, 1] = -sin
rotate_matrix[..., 1, 0] = sin
rotate_matrix[..., 1, 1] = cos
np.save(save_path, rotate_matrix)
if load:
return rotate_matrix
rotate_matrix = prepare_rotate_matrix(min_angel=1, load=True)
class DataManager():
"""
管理所有数据集的训练与测试数据
"""
def __init__(self, args, save=True):
self.args = args
self.obs_frames = args.obs_frames
self.pred_frames = args.pred_frames
self.total_frames = self.pred_frames + self.obs_frames
self.step = args.step
self.init_position = np.array([args.init_position, args.init_position])
self.god_past_traj = np.stack([self.init_position for _ in range(self.obs_frames)])
self.god_future_traj = np.stack([self.init_position for _ in range(self.pred_frames)])
self.log_dir = dir_check(args.log_dir)
self.save_file_name = args.model_name + '_{}.npy'
self.save_path = os.path.join(self.log_dir, self.save_file_name)
self.train_info = self.get_train_and_test_agents()
def get_train_and_test_agents(self):
dir_check('./dataset_npz/')
self.npy_file_base_path = './dataset_npz/{}/data.npz'
if self.args.train_type == 'one':
train_list = [self.args.test_set]
test_list = [self.args.test_set]
elif self.args.train_type == 'all':
test_list = [self.args.test_set]
train_list = [i for i in range(8) if not i == self.args.test_set]
# train_list = [i for i in range(3) if not i == self.args.test_set] # toy exp
data_managers_train = []
for dataset in train_list:
data_managers_train.append(self.get_agents_from_dataset(dataset))
data_managers_test = []
for dataset in test_list:
data_managers_test.append(self.get_agents_from_dataset(dataset))
sample_number_original = 0
sample_time = 1
for dm in data_managers_train:
sample_number_original += dm.person_number
if self.args.train_type == 'one':
index = set([i for i in range(sample_number_original)])
if USE_SEED:
random.seed(SEED)
train_index = random.sample(index, int(sample_number_original * self.args.train_percent))
test_index = list(index - set(train_index))
test_agents = self.sample_data(data_managers_train[0], test_index)
train_agents = self.sample_data(data_managers_train[0], train_index)
if self.args.reverse:
train_agents += self.sample_data(data_managers_train[0], train_index, reverse=True, desc='Preparing reverse data')
sample_time += 1
if self.args.add_noise:
for repeat in tqdm(range(self.args.add_noise), desc='Prepare noise data...'):
train_agents += self.sample_data(data_managers_train[0], train_index, add_noise=True, use_time_bar=False)
sample_time += 1
elif self.args.train_type == 'all':
train_agents = []
trajmaps = []
if len(self.args.train_percent) == 1:
train_percent = self.args.train_percent * np.ones([len(train_list)])
else:
train_percent = [self.args.train_percent[index] for index in train_list]
for index, dm in enumerate(data_managers_train):
agents, trajmap = self.sample_data(
dm,
person_index='auto',
random_sample=train_percent[index],
return_trajmap=True
)
train_agents += agents
trajmaps.append(trajmap)
if self.args.reverse:
for index, dm in enumerate(data_managers_train):
train_agents += self.sample_data(
dm,
person_index='auto',
random_sample=train_percent[index],
reverse=True,
use_time_bar=False
)
sample_time += 1
if self.args.rotate:
for angel in tqdm(range(360//self.args.rotate, 360, 360//self.args.rotate), desc='Prepare rotate data...'):
sample_time += 1
for index, [dm, gm] in enumerate(zip(data_managers_train, trajmaps)):
train_agents += self.sample_data(
dm,
person_index='auto',
random_sample=train_percent[index],
rotate=angel,
use_time_bar=False,
given_trajmap=gm
)
test_agents, test_trajmap = self.sample_data(
data_managers_test[0],
person_index='auto',
return_trajmap=True,
random_sample=False,
)
train_info = dict()
train_info['train_data'] = train_agents
train_info['test_data'] = test_agents
train_info['train_number'] = len(train_agents)
train_info['sample_time'] = sample_time
return train_info
def data_loader(self, dataset_index):
"""
Read trajectory data from csv file.
returns: `person_data`, `frame_data`
"""
dataset_dir = [
'./data/eth/univ',
'./data/eth/hotel',
'./data/ucy/zara/zara01',
'./data/ucy/zara/zara02',
'./data/ucy/univ/students001',
'./data/ucy/zara/zara03',
'./data/ucy/univ/students003',
'./data/ucy/univ/uni_examples',
]
dataset_xy_order = [
[3, 2],
[2, 3],
[3, 2],
[3, 2],
[2, 3],
[3, 2],
[2, 3],
[2, 3],
]
# dataset_dir = [ # toy exp
# './data/toy/half_circle',
# './data/toy/line_circle',
# './data/toy',
# ]
# dataset_xy_order = [ # toy exp
# [2, 3],
# [2, 3],
# [2, 3],
# ]
dataset_dir_current = dataset_dir[dataset_index]
order = dataset_xy_order[dataset_index]
csv_file_path = os.path.join(dataset_dir_current, 'true_pos_.csv')
data = np.genfromtxt(csv_file_path, delimiter=',').T
# 加载数据(使用帧排序)
frame_data = {}
frame_list = set(data.T[0])
for frame in frame_list:
index_current = np.where(data.T[0] == frame)[0]
frame_data[str(frame)] = np.column_stack([
data[index_current, 1],
data[index_current, order[0]],
data[index_current, order[1]],
])
# 加载数据(使用行人编号排序)
person_data = {}
person_list = set(data.T[1])
for person in person_list:
index_current = np.where(data.T[1] == person)[0]
person_data[str(person)] = np.column_stack([
data[index_current, 0],
data[index_current, order[0]],
data[index_current, order[1]],
])
print('Load dataset from csv file done.')
return person_data, frame_data
def get_agents_from_dataset(self, dataset):
"""
使用数据计算social关系,并组织为`Agent_part`类或`Frame`类
return: agents, original_sample_number
"""
base_path = dir_check(os.path.join('./dataset_npz/', '{}'.format(dataset)))
npy_path = self.npy_file_base_path.format(dataset)
if os.path.exists(npy_path):
# 从保存的npy数据集文件中读
video_neighbor_list, video_matrix, frame_list = self.load_video_matrix(dataset)
else:
# 新建npy数据集文件
person_data, frame_data = self.data_loader(dataset)
video_neighbor_list, video_matrix, frame_list = self.create_video_matrix(
person_data,
frame_data,
save_path=npy_path
)
if self.args.train_base == 'agent':
data_manager = self.get_agents(video_neighbor_list, video_matrix, frame_list)
print('\nPrepare agent data in dataset {} done.'.format(dataset))
return data_manager
def load_video_matrix(self, dataset):
"""
从保存的文件中读取social matrix和social neighbor
"""
print('Load data from "{}"...'.format(self.npy_file_base_path.format(dataset)))
all_data = np.load(self.npy_file_base_path.format(dataset), allow_pickle=True)
video_neighbor_list = all_data['video_neighbor_list']
video_matrix = all_data['video_matrix']
frame_list = all_data['frame_list']
return video_neighbor_list, video_matrix, frame_list
def create_video_matrix(self, person_data, frame_data, save_path='null'):
"""
计算social neighbor
`video_matrix`: shape = [frame_number, person_number, 2]
"""
person_list = np.sort(np.stack([float(person) for person in person_data])).astype(np.str)
frame_list = np.sort(np.stack([float(frame) for frame in frame_data])).astype(np.str)
person_number = len(person_list)
frame_number = len(frame_list)
video_matrix = self.args.init_position * np.ones([frame_number, person_number, 2])
for person in person_data:
person_index = np.where(person_list == person)[0][0]
frame_list_current = (person_data[person]).T[0].astype(np.str)
frame_index_current = np.reshape(np.stack([np.where(frame_current == frame_list) for frame_current in frame_list_current]), [-1])
traj_current = person_data[person][:, 1:]
video_matrix[frame_index_current, person_index, :] = traj_current
video_neighbor_list = []
for frame_index, data in enumerate(tqdm(video_matrix, desc='Calculate social matrix...')):
person_appear = np.where(np.not_equal(data.T[0], self.args.init_position))[0]
video_neighbor_list.append(person_appear)
if not save_path == 'null':
np.savez(
save_path,
video_neighbor_list=video_neighbor_list,
video_matrix=video_matrix,
frame_list=frame_list,
)
return video_neighbor_list, video_matrix, frame_list
def sample_data(self, data_manager, person_index, add_noise=False, reverse=False, rotate=False, desc='Calculate agent data', use_time_bar=True, random_sample=False, sample_start=0.0, given_trajmap=False, return_trajmap=False):
"""
Sample training data from data_manager.
`random_sample`: 为0到1的正数时表示随机取样百分比,为-1到0的负数时表示按照数据集时间顺序百分比取样的终点,此时0~1正数`sample_start`表示起点
return: a list of Agent_Part
"""
agents = []
if person_index == 'auto':
if random_sample > 0 and random_sample < 1:
if USE_SEED:
random.seed(SEED)
person_index = random.sample(
[i for i in range(data_manager.person_number)],
int(data_manager.person_number * random_sample),
)
elif random_sample == 0 or random_sample >= 1 or random_sample < -1:
person_index = range(data_manager.person_number)
elif random_sample < 0 and random_sample >= -1:
person_index = [i for i in range(
(data_manager.person_number * np.abs(sample_start)).astype(int), # start index
(data_manager.person_number * np.abs(random_sample)).astype(int), # end index
)]
if use_time_bar:
itera = tqdm(person_index, desc=desc)
else:
itera = person_index
for person in itera:
agent_current = data_manager.agent_data[person]
start_frame = agent_current.start_frame
end_frame = agent_current.end_frame
for frame_point in range(start_frame, end_frame, self.args.step):
if frame_point + self.total_frames > end_frame:
break
# type: Agent_Part
sample_agent = data_manager.get_trajectory(
person,
frame_point,
frame_point+self.obs_frames,
frame_point+self.total_frames,
calculate_social=self.args.calculate_social,
normalization=self.args.normalization,
add_noise=add_noise,
reverse=reverse,
rotate=rotate,
)
agents.append(sample_agent)
if not given_trajmap:
traj_trajmap = TrajectoryMapManager(agents)
for index in range(len(agents)):
agents[index].write_traj_map(traj_trajmap)
if return_trajmap:
return agents, traj_trajmap
else:
return agents
else:
for index in range(len(agents)):
agents[index].write_traj_map(given_trajmap)
return agents
def get_agents(self, video_neighbor_list, video_matrix, frame_list):
"""
使用social matrix计算每个人的`Agent`类,并取样得到用于训练的`Agent_part`类数据
return: agents(取样后, type=`Agent_part`), original_sample_number
"""
data_manager = DatasetManager(
video_neighbor_list, video_matrix, frame_list, self.args.init_position
)
return data_manager
class DatasetManager():
"""
管理一个数据集内的所有轨迹数据
"""
def __init__(self, video_neighbor_list, video_matrix, frame_list, init_position):
self.video_neighbor_list = video_neighbor_list
self.video_matrix = video_matrix
self.frame_list = frame_list
self.init_position = init_position
self.agent_data = self.prepare_agent_data()
def prepare_agent_data(self):
self.frame_number, self.person_number, _ = self.video_matrix.shape
agent_data = []
for person in range(self.person_number):
agent_data.append(Agent(
person,
self.video_neighbor_list,
self.video_matrix,
self.frame_list,
self.init_position,
))
return agent_data
def get_trajectory(self, agent_index, start_frame, obs_frame, end_frame, calculate_social=True, normalization=False, add_noise=False, reverse=False, rotate=False):
target_agent = self.agent_data[agent_index]
frame_list = target_agent.frame_list
neighbor_list = target_agent.video_neighbor_list[obs_frame-1].tolist()
neighbor_list = set(neighbor_list) - set([agent_index])
neighbor_agents = [self.agent_data[nei] for nei in neighbor_list]
return Agent_Part(
target_agent, neighbor_agents, frame_list, start_frame, obs_frame, end_frame, calculate_social=calculate_social, normalization=normalization, add_noise=add_noise, reverse=reverse, rotate=rotate
)
class Agent():
def __init__(self, agent_index, video_neighbor_list, video_matrix, frame_list, init_position):
self.agent_index = agent_index
self.traj = video_matrix[:, agent_index, :]
self.video_neighbor_list = video_neighbor_list
self.frame_list = frame_list
self.start_frame = np.where(np.not_equal(self.traj.T[0], init_position))[0][0]
self.end_frame = np.where(np.not_equal(self.traj.T[0], init_position))[0][-1] + 1 # 取不到
class Agent_Part():
def __init__(self, target_agent, neighbor_agents, frame_list, start_frame, obs_frame, end_frame, calculate_social=True, normalization=False, add_noise=False, reverse=False, rotate=False):
# Trajectory info
self.start_frame = start_frame
self.obs_frame = obs_frame
self.end_frame = end_frame
self.obs_length = obs_frame - start_frame
self.total_frame = end_frame - start_frame
self.frame_list = frame_list[start_frame:end_frame]
self.vertual_agent = False
self.rotate = rotate
self.reverse = reverse
self.traj_map = 'null'
# Trajectory
self.traj = target_agent.traj[start_frame:end_frame]
if add_noise:
noise_curr = np.random.normal(0, 0.1, size=self.traj.shape)
self.traj += noise_curr
self.vertual_agent = True
elif reverse:
self.traj = self.traj[::-1]
self.vertual_agent = True
elif rotate: # rotate 为旋转角度
rotate_matrix_current = rotate_matrix[rotate, :, :]
self.traj_original = self.traj
self.traj = self.traj[0] + np.matmul(self.traj - self.traj[0], rotate_matrix_current)
self.vertual_agent = True
self.pred = 0
self.start_point = self.traj[0]
# Options
self.calculate_social = calculate_social
self.normalization = normalization
# Neighbor info
if not | |
values are real floats
matching shape of model and data. These are only used to find delays from
itegrations that are unflagged for at least two frequency bins. In this case,
the delays are assumed to have equal weight, otherwise the delays take zero weight.
refant : antenna number integer to use as a reference,
The antenna position coordaintes are centered at the reference, such that its phase
is identically zero across all frequencies. If None, use the first key in data as refant.
assume_2D : type=boolean, [default=False]
If this is true, all dimensions of antpos beyond the first two will be ignored.
If return_gains is False and assume_2D is False, then the returned variables will
look like Phi_0, Phi_1, Phi_2, etc. corresponding to the dimensions in antpos.
verbose : print output, type=boolean, [default=False]
tol : type=float, baseline match tolerance in units of baseline vectors (e.g. meters)
edge_cut : int, number of channels to exclude at each band edge in phase slope solver
time_avg : boolean, if True, replace resultant antenna phase slopes with the median across time
zero_pad : float factor by which to expand the grid onto which the data is binned. Only used
for ndim_fft mode. Must be >= 1.
return_gains : boolean. If True, convert result into a dictionary of gain waterfalls.
gain_ants : list of ant-pol tuples for return_gains dictionary
Output:
-------
if not return_gains:
fit : dictionary containing frequency-indpendent phase slope, e.g. Phi_ns_Jxx
for each position component and polarization in units of radians / [antpos].
If assume_2D is False, then these will be the more general Phi_0, Phi_1,
Phi_2, etc. corresponding to the dimensions in antpos.
else:
gains : dictionary with gain_ants as keys and gain waterfall arrays as values
"""
# check solver and edgecut
assert solver in PHASE_SLOPE_SOLVERS, f"Unrecognized solver {solver}"
echo(f"...configuring global_phase_slope_logcal for the {solver} algorithm", verbose=verbose)
assert 2 * edge_cut < list(data.values())[0].shape[1] - 1, "edge_cut cannot be >= Nfreqs/2 - 1"
# get keys from model and data dictionaries
keys = sorted(set(model.keys()) & set(data.keys()))
antnums = np.unique(list(antpos.keys()))
# make weights if None and make flags
if wgts is None:
wgts = odict()
for i, k in enumerate(keys):
wgts[k] = np.ones_like(data[k], dtype=np.float)
flags = DataContainer({k: ~wgts[k].astype(np.bool) for k in wgts})
# center antenna positions about the reference antenna
if refant is None:
refant = keys[0][0]
assert refant in antnums, "reference antenna {} not found in antenna list".format(refant)
antpos = odict(list(map(lambda k: (k, antpos[k] - antpos[refant]), antpos.keys())))
# count dimensions of antenna positions, figure out how many to solve for
nDims = _count_nDims(antpos, assume_2D=assume_2D)
# average data over baselines
if reds is None:
reds = redcal.get_pos_reds(antpos, bl_error_tol=tol)
ap = data.antpairs()
reds_here = []
for red in reds:
red_here = [bl[0:2] for bl in red if bl[0:2] in ap or bl[0:2][::-1] in ap] # if the reds have polarizations, ignore them
if len(red_here) > 0:
reds_here.append(red_here)
avg_data, avg_flags, _ = utils.red_average(data, reds=reds_here, flags=flags, inplace=False)
red_keys = list(avg_data.keys())
avg_wgts = DataContainer({k: (~avg_flags[k]).astype(np.float) for k in avg_flags})
avg_model, _, _ = utils.red_average(model, reds=reds_here, flags=flags, inplace=False)
ls_data, ls_wgts, bl_vecs, pols = {}, {}, {}, {}
for rk in red_keys:
# build equation string
eqn_str = ''
ap0, ap1 = split_pol(rk[2])
for d in range(nDims):
if len(eqn_str) > 0:
eqn_str += ' + '
eqn_str += f'{antpos[rk[0]][d]}*Phi_{d}_{ap0} - {antpos[rk[1]][d]}*Phi_{d}_{ap1}'
bl_vecs[eqn_str] = antpos[rk[0]] - antpos[rk[1]]
pols[eqn_str] = rk[2]
# calculate median of unflagged angle(data/model)
# ls_weights are sum of non-binary weights
dm_ratio = avg_data[rk] / avg_model[rk]
dm_ratio /= np.abs(dm_ratio) # This gives all channels roughly equal weight, moderating the effect of RFI (as in firstcal)
binary_flgs = np.isclose(avg_wgts[rk], 0.0) | np.isinf(dm_ratio) | np.isnan(dm_ratio)
avg_wgts[rk][binary_flgs] = 0.0
dm_ratio[binary_flgs] *= np.nan
if solver == 'linfit': # we want to fit the angles
ls_data[eqn_str] = np.nanmedian(np.angle(dm_ratio[:, edge_cut:(dm_ratio.shape[1] - edge_cut)]), axis=1, keepdims=True)
elif solver in ['dft', 'ndim_fft']: # we want the full complex number
ls_data[eqn_str] = np.nanmedian(dm_ratio[:, edge_cut:(dm_ratio.shape[1] - edge_cut)], axis=1, keepdims=True)
ls_wgts[eqn_str] = np.sum(avg_wgts[rk][:, edge_cut:(dm_ratio.shape[1] - edge_cut)], axis=1, keepdims=True)
# set unobserved data to 0 with 0 weight
ls_wgts[eqn_str][~np.isfinite(ls_data[eqn_str])] = 0
ls_data[eqn_str][~np.isfinite(ls_data[eqn_str])] = 0
if solver == 'linfit': # build linear system for phase slopes and solve with linsolve
# setup linsolve and run
solver = linsolve.LinearSolver(ls_data, wgts=ls_wgts)
echo("...running linsolve", verbose=verbose)
fit = solver.solve()
echo("...finished linsolve", verbose=verbose)
elif solver in ['dft', 'ndim_fft']: # look for a peak angle slope by FTing across the array
if not np.all([split_pol(pol)[0] == split_pol(pol)[1] for pol in data.pols()]):
raise NotImplementedError('DFT/FFT solving of global phase not implemented for abscal with cross-polarizations.')
for k in ls_data:
ls_data[k][ls_wgts[k] == 0] = np.nan
# solve one polarization at a time
fit = {}
for pol in data.pols():
eqkeys = [k for k in bl_vecs.keys() if pols[k] == pol]
# reformat data into arrays for dft_phase_slope_solver
if solver == 'dft':
assert assume_2D, 'dft solver only works when the array is 2D. Try using ndim_fft instead.'
blx = np.array([bl_vecs[k][0] for k in eqkeys])
bly = np.array([bl_vecs[k][1] for k in eqkeys])
data_array = np.array([ls_data[k] for k in eqkeys])
slope_x, slope_y = dft_phase_slope_solver(blx, bly, data_array)
fit['Phi_0_{}'.format(split_pol(pol)[0])] = slope_x
fit['Phi_1_{}'.format(split_pol(pol)[0])] = slope_y
# Perform ndim_fft solver
elif solver == 'ndim_fft':
slopes = ndim_fft_phase_slope_solver({k: ls_data[k] for k in eqkeys}, {k: bl_vecs[k] for k in eqkeys},
assume_2D=assume_2D, zero_pad=zero_pad, bl_error_tol=tol)
for d, slope in enumerate(slopes):
fit[f'Phi_{d}_{split_pol(pol)[0]}'] = slope
# time average
if time_avg:
Ntimes = list(fit.values())[0].shape[0]
for k in fit:
fit[k] = np.repeat(np.moveaxis(np.median(fit[k], axis=0)[np.newaxis], 0, 0), Ntimes, axis=0)
if not return_gains:
# rename variables ew/ns instead of 0/1 to maintain backwards compatability
if assume_2D:
params = list(fit.keys())
for p in params:
if 'Phi_0' in p:
fit[p.replace('Phi_0', 'Phi_ew')] = fit[p]
del fit[p]
if 'Phi_1' in p:
fit[p.replace('Phi_1', 'Phi_ns')] = fit[p]
del fit[p]
return fit
else:
# compute gains, dotting each slope into the corresponding coordinate in that dimension
gains = {}
for ant in gain_ants:
Phis = [fit[f'Phi_{d}_{ant[1]}'] for d in range((nDims, 2)[assume_2D])]
gains[ant] = np.exp(1.0j * np.einsum('i,ijk,k->jk', antpos[ant[0]][0:len(Phis)],
Phis, np.ones(data[keys[0]].shape[1])))
return gains
def merge_gains(gains, merge_shared=True):
"""
Merge a list of gain (or flag) dictionaries.
If gains has boolean ndarray keys, interpret as flags
and merge with a logical OR.
Parameters:
-----------
gains : type=list or tuple, series of gain dictionaries with (ant, pol) keys
and complex ndarrays as values (or boolean ndarrays if flags)
merge_shared : type=bool, If True merge only shared keys, eliminating the others.
Otherwise, merge all keys.
Output:
-------
merged_gains : type=dictionary, merged gain (or flag) dictionary with same key-value
structure as input dict.
"""
# get shared keys
if merge_shared:
keys = sorted(set(reduce(operator.and_, [set(g.keys()) for g in gains])))
else:
keys = sorted(set(reduce(operator.add, [list(g.keys()) for g in gains])))
# form merged_gains dict
merged_gains = odict()
# determine if gains or flags from first entry in gains
fedflags = False
if gains[0][list(gains[0].keys())[0]].dtype == np.bool_:
fedflags = True
# iterate over keys
for i, k in enumerate(keys):
if fedflags:
merged_gains[k] = reduce(operator.add, [g.get(k, True) for g in gains])
else:
merged_gains[k] = reduce(operator.mul, [g.get(k, 1.0) for g in gains])
return merged_gains
def data_key_to_array_axis(data, key_index, array_index=-1, avg_dict=None):
"""
move an index of data.keys() into the data axes
Parameters:
-----------
data : type=DataContainer, complex visibility data with
antenna-pair + pol tuples for keys, in DataContainer dictionary format.
key_index : integer, index of keys to consolidate into data arrays
array_index : integer, which axes of data arrays to append to
avg_dict : DataContainer, a dictionary with same keys as data
that will have its data arrays averaged along key_index
Result:
-------
new_data : DataContainer, complex visibility data
with key_index of keys moved into the data arrays
new_avg_dict : copy of avg_dict. Only returned if avg_dict is not None.
popped_keys : unique list of keys moved into data array axis
"""
# instantiate new data object
new_data = odict()
new_avg = odict()
# get keys
keys = list(data.keys())
# sort keys across key_index
key_sort = np.argsort(np.array(keys, dtype=np.object)[:, key_index])
keys = list(map(lambda i: keys[i], key_sort))
| |
at least one null byte
nameBytes.extend( (0x20 - len(nameBytes)) * b'\00' )
nameChecksOut = True
else:
msg( 'Unable to encode the new name into 31 bytes. Try shortening the name.' )
except:
msg( 'Unable to encode the new name into 31 bytes. There may be an invalid character.' )
if not newName: # User canceled above name input window
return
# Write the new name's bytes into both CSS files at the appropriate location
cssData1 = getFileDataFromDiscTreeAsBytes( iid=cssData1Iid )
nameBytesLength = len( nameBytes )
cssData0[nameOffset:nameOffset+nameBytesLength] = nameBytes
cssData1[nameOffset:nameOffset+nameBytesLength] = nameBytes
# Save the new CSS file data to disc
_, _, isoOffset, fileSize, isoPath, _, _ = Gui.isoFileTree.item( cssData0Iid, 'values' )
Gui.isoFileTree.item( cssData0Iid, values=('Stage name updated', 'file', isoOffset, fileSize, isoPath, 'ram', hexlify(cssData0)), tags='changed' )
_, _, isoOffset, fileSize, isoPath, _, _ = Gui.isoFileTree.item( cssData1Iid, 'values' )
Gui.isoFileTree.item( cssData1Iid, values=('Stage name updated', 'file', isoOffset, fileSize, isoPath, 'ram', hexlify(cssData1)), tags='changed' )
# Update the name shown for the stage in question
Gui.isoFileTree.item( self.iidSelectionsTuple[0], values=(' '+newName,)+iidValues[1:] ) # Extra spaces added to indent the name from the stage folder name
# Remember these changes, and update the program status
unsavedDiscChanges.append( 'Random Neutral stage name updated.' )
updateProgramStatus( 'Stage Name Updated' )
class textureMenuOptions( Tk.Menu, object ):
def __init__( self, parent, tearoff=True, *args, **kwargs ):
super( textureMenuOptions, self ).__init__( parent, tearoff=tearoff, *args, **kwargs )
self.open = False
def repopulate( self ):
""" This method will be called every time the submenu is displayed. """
# Clear all current population
self.delete( 0, 'last' )
self.lastItem = ''
# Check if anything is currently selected
self.iids = Gui.datTextureTree.selection() # Returns a tuple of iids, or an empty string if nothing is selected.
self.selectionCount = len( self.iids )
if self.iids: # Keyboard shortcuts:
self.lastItem = self.iids[-1] # Selects the lowest position item selected in the treeview.
self.add_command( label='Export Selected Texture(s)', underline=0, command=exportTextures ) # E
self.add_command( label='Export All', underline=7, command=self.exportAllTextures ) # A
self.add_command( label='Import Texture(s)', underline=0, command=importImageFiles ) # I
self.add_separator()
self.add_command( label='Blank Texture (Zero-out)', underline=0, command=blankTextures ) # B
#self.add_command(label='Disable (Prevents Rendering)', underline=0, command=disableTextures )
if self.selectionCount > 1:
self.add_command( label='Copy Offsets to Clipboard', underline=0, command=self.textureOffsetToClipboard ) # C
self.add_command( label='Copy Dolphin Hashes to Clipboard', underline=13, command=self.dolphinHashToClipboard ) # H
else:
self.add_command( label='Show in Structural Analysis', underline=0, command=self.showTextureInStructAnalysisTab ) # S
self.add_command( label='Copy Offset to Clipboard', underline=0, command=self.textureOffsetToClipboard ) # C
self.add_command( label='Copy Dolphin Hash to Clipboard', underline=13, command=self.dolphinHashToClipboard ) # H
else:
self.add_command( label='Export All', underline=7, command=self.exportAllTextures ) # A
def exportAllTextures( self ):
if len( Gui.datTextureTree.get_children() ) == 0:
msg( 'You need to first open a file that you would like to export textures from.'
'\n\n(If you have loaded a file, either there were no textures found, or '
'you have texture filters blocking your results.)' )
else:
exportTextures( exportAll=True )
def showTextureInStructAnalysisTab( self ):
# Set the selected item in DAT Texture Tree, so that it's clear which image is being operated on
Gui.datTextureTree.selection_set( self.lastItem )
Gui.datTextureTree.focus( self.lastItem )
# Make sure the current iid is the start of a structure (may not be in the case of particle effects)
structOffset = int( self.lastItem )
if not self.lastItem in globalDatFile.structureOffsets:
structOffset = globalDatFile.getPointerOwner( structOffset, True )
# Add the texture's data block instances to the tree and show them
showStructInStructuralAnalysis( structOffset )
# Switch to the SA tab
Gui.mainTabFrame.select( Gui.savTab )
def textureOffsetToClipboard( self ):
Gui.datTextureTree.selection_set( self.iids ) # Highlights the item(s)
Gui.datTextureTree.focus( self.iids[0] ) # Sets keyboard focus to the first item
# Get the offsets of all of the items selected
offsets = []
for iid in self.iids:
imageDataDetails = Gui.datTextureTree.item( iid, 'values' )[0]
offsets.append( imageDataDetails.split()[0] )
copyToClipboard( ', '.join(offsets) )
def dolphinHashToClipboard( self ):
Gui.datTextureTree.selection_set( self.iids ) # Highlights the item(s)
Gui.datTextureTree.focus( self.iids[0] ) # Sets keyboard focus to the first item
# Get the hashes of all of the items selected
hashedFileNames = []
for iid in self.iids:
hashedFileNames.append( constructTextureFilename( globalDatFile, iid, forceDolphinHash=True ) )
copyToClipboard( ', '.join(hashedFileNames) )
class structureMenuOptions( Tk.Menu, object ):
def __init__( self, parent, tearoff=True, *args, **kwargs ):
super( structureMenuOptions, self ).__init__( parent, tearoff=tearoff, *args, **kwargs )
self.open = False
def repopulate( self ):
""" This method will be called every time the submenu is displayed. """
# Clear all current population
self.delete( 0, 'last' )
# Determine the kind of structure(s) we're working with, to determine menu options
self.iids = Gui.fileStructureTree.selection()
self.selectionCount = len( self.iids )
if self.selectionCount == 1: # Keyboard shortcuts:
itemName = Gui.fileStructureTree.item( self.iids[0], 'text' )
if itemName == 'coll_data':
#collDataOffset = int( self.iids[0].split('/')[-1] )
self.add_command( label='Render', underline=0, command=self.renderCollisions )
self.add_command( label='Copy Offset to Clipboard', underline=0, command=self.offsetToClipboard ) # C
# Check the kind of structure clicked on
structOffset = int( self.iids[0].split('/')[-1] )
structure = globalDatFile.getStruct( structOffset )
if structure.__class__ in ( hsdStructures.ImageObjDesc, hsdStructures.TextureObjDesc, hsdStructures.ImageDataBlock ):
self.add_command( label='Show in DAT Texture Tree', underline=0, command=self.showInDatTextureTree ) # S
# Check if the currently selected item is 'marked'
currentTags = Gui.fileStructureTree.item( self.iids[0], 'tags' )
if 'marked' in currentTags:
self.add_command( label='Unmark Selected Struct', underline=0, command=self.unmarkSelectedStructs ) # U
else:
self.add_command( label='Mark Selected Struct', underline=0, command=self.markSelectedStructs ) # M
self.add_separator()
elif self.selectionCount > 1:
self.add_command( label='Copy Offsets to Clipboard', underline=0, command=self.offsetToClipboard ) # C
# Check if there are more marked or unmarked items selected
markedItems = 0
unmarkedItems = 0
for iid in self.iids:
if 'marked' in Gui.fileStructureTree.item( iid, 'tags' ): markedItems += 1
else: unmarkedItems += 1
if markedItems >= unmarkedItems:
self.add_command( label='Unmark Selected Structs', underline=0, command=self.unmarkSelectedStructs ) # U
else:
self.add_command( label='Mark Selected Structs', underline=0, command=self.markSelectedStructs ) # M
self.add_separator()
self.add_command( label='Collapse Data Space', underline=1, command=self.collapseDataSpace ) # O
self.add_command( label='Extend Data Space', underline=0, command=self.extendDataSpace ) # E
def offsetToClipboard( self ):
Gui.fileStructureTree.selection_set( self.iids ) # Highlights the item(s)
Gui.fileStructureTree.focus( self.iids[0] ) # Sets keyboard focus to the first item
# Get the offsets of all of the items selected
offsets = []
for iid in self.iids:
offset = int( iid.split('/')[-1] )
offsets.append( uHex(0x20+offset) )
copyToClipboard( ', '.join(offsets) )
def showInDatTextureTree( self ):
# Check the kind of structure clicked on
structOffset = int( self.iids[0].split('/')[-1] )
structure = globalDatFile.getStruct( structOffset )
# Get the image data offset (whether from the TObj or another lower structure)
if structure.__class__ == hsdStructures.TextureObjDesc:
imageHeaderOffset = structure.getValues( 'Image_Header_Pointer' )
imageHeader = globalDatFile.getStruct( imageHeaderOffset )
imageDataOffset = imageHeader.getValues()[0]
elif structure.__class__ == hsdStructures.ImageObjDesc:
imageDataOffset = structure.getValues()[0]
else: # Should be an ImageDataBlock
imageDataOffset = structure.offset
targetIid = str( imageDataOffset )
# Make sure the DAT Texture Tree tab has been populated
if not Gui.datTextureTree.get_children() or not Gui.datTextureTree.exists( targetIid ):
clearDatTab()
scanDat( priorityTargets=(imageDataOffset,) )
# Look for this texture in the DAT Texture Tree tab
if Gui.datTextureTree.exists( targetIid ):
# Switch tabs, and select the target texture
Gui.mainTabFrame.select( Gui.datTab )
Gui.datTextureTree.selection_set( targetIid )
Gui.datTextureTree.see( targetIid )
else: # ¿Qué?
print 'Unable to find {} (0x{:X}) in the DAT Texture Tree tab.'.format( targetIid, 0x20+int(targetIid) )
msg( 'The image for ' + structure.name + ' could not\nbe found in the DAT Texture Tree tab!', '¿Qué?' )
def markSelectedStructs( self ):
# Add tags to the selected items
for iid in self.iids:
currentTags = Gui.fileStructureTree.item( iid, 'tags' )
if not currentTags:
Gui.fileStructureTree.item( iid, tags='marked' )
elif 'marked' not in currentTags:
currentTags.append( 'marked' )
Gui.fileStructureTree.item( iid, tags=currentTags )
def unmarkSelectedStructs( self ):
# Add tags to the selected items
for iid in self.iids:
try:
currentTags = list( Gui.fileStructureTree.item( iid, 'tags' ) )
currentTags.remove( 'marked' )
Gui.fileStructureTree.item( iid, tags=currentTags )
except Exception as e:
print "Unable to remove 'marked' selection status from", iid
print e
def collapseDataSpace( self ):
modifierWindow = DataSpaceModifierWindow( Gui.root, 'collapse' )
if modifierWindow.offset and modifierWindow.amount:
# Perform some basic validation and typcasting
try:
offset = int( modifierWindow.offset, 16 ) - 0x20
amount = int( modifierWindow.amount, 16 )
except Exception as err:
print err
msg( 'Invalid input values.' )
return
globalDatFile.collapseDataSpace( offset, amount )
# Need to reinitialize file structures
clearStructuralAnalysisTab()
analyzeDatStructure()
updateProgramStatus( 'File Data Collapsed' )
def extendDataSpace( self ):
modifierWindow = DataSpaceModifierWindow( Gui.root, 'extend' )
if modifierWindow.offset and modifierWindow.amount:
# Perform some basic validation and typcasting
try:
offset = int( modifierWindow.offset, 16 ) - 0x20
amount = int( modifierWindow.amount, 16 )
except Exception as err:
print err
msg( 'Invalid input values.' )
return
globalDatFile.extendDataSpace( offset, amount )
# Need to reinitialize file structures
clearStructuralAnalysisTab()
analyzeDatStructure()
updateProgramStatus( 'File Data Extended' )
def renderCollisions( self ):
CollisionsEditor( int(self.iids[0].split('/')[-1]) )
class CollisionsEditor( basicWindow ):
def __init__( self, collStructOffset ):
basicWindow.__init__( self, Gui.root, 'Collision Data for ' + globalDatFile.fileName, offsets=(0, 30), topMost=False, resizable=True, minsize=(600, 350) )
self.highlightedLabels = []
self.highlightedId = None
# Get the structures defining the stage's spot, links, and areas (they should already be initialized)
self.collStruct = globalDatFile.structs[ collStructOffset ]
spotTableOffset, linkTableOffset, areaTableOffset = self.collStruct.getChildren()
self.spotTable = globalDatFile.structs[ spotTableOffset ]
self.linkTable = globalDatFile.structs[ linkTableOffset ]
self.areaTable = globalDatFile.structs[ areaTableOffset ]
self.vertices = self.spotTable.getVertices()
self.collisionLinks = self.linkTable.getFaces()
self.areas = self.areaTable.getAreas()
self.showAreas = Tk.BooleanVar( value=False )
self.showBasicLinks = Tk.BooleanVar( value=True )
# self.showPreLinks = Tk.BooleanVar( value=False )
# self.showPostLinks = Tk.BooleanVar( value=False )
# Get reference counts for spots, and set render status
spotRefCounts = {}
for link in self.collisionLinks:
for index in link.allSpotIndices:
if index == -1: continue
elif index in spotRefCounts:
spotRefCounts[index] += 1
else:
spotRefCounts[index] = 1
# if link.type == 'pre': link.render = self.showPreLinks.get()
# elif link.type == 'post': link.render = self.showPostLinks.get()
# else:
link.render = self.showBasicLinks.get() # Basic links
# Convert the 2D collision lines to 3D collision surfaces
self.extrudeCollisionLinks()
# Create vertices from the areas, and add | |
in the property
expectedResult = None
if propType == "String":
expectedResult = stringArray[2]
elif propType == "Integer":
expectedResult = int(stringArray[2])
elif propType == "Decimal":
expectedResult = decimal.Decimal(stringArray[2])
else:
expectedResult = False
if str.lower(stringArray[2]) == 'true':
expectedResult = True
#now compare getter to the reformatted stringArray[2] and see if we have successfully altered the property
if getter == expectedResult:
Graph.api.revertEntityPropertyValues(entityID, False)
getter = Graph.api.getEntityPropertyValue(entityID, stringArray[1])
if getter == baseValue:
testResult = True
except Exceptions.ScriptError:
#Some test cases violate restriction constraints and will raise an exception.
# This works as intended
testResult = False
except Exception as e:
errorMsg = ('Error! Traceback = %s' % (e) )
errata.append(errorMsg)
testcase = str(stringArray[0])
allTrueResult = str(testResult)
expectedResult = "True"
results = [n, testcase, allTrueResult, expectedResult, errata]
resultSet.append(results)
Graph.logQ.put( [logType , logLevel.INFO , method , "Finished testcase %s" %(n)])
Graph.logQ.put( [logType , logLevel.DEBUG , method , "exiting"])
return resultSet
def testEntityPhase4_1():
''' a repeat of testEntityPhase3, but using the Python script interface instead of going directly against Graph.api '''
method = moduleName + '.' + 'testEntityPhase4.1'
Graph.logQ.put( [logType , logLevel.DEBUG , method , "entering"])
results = []
resultSet = []
testFileName = os.path.join(testDirPath, "Entity_Phase4.atest")
readLoc = codecs.open(testFileName, "r", "utf-8")
allLines = readLoc.readlines()
readLoc.close
n = 0
for eachReadLine in allLines:
errata = []
n = n+1
stringArray = str.split(eachReadLine)
Graph.logQ.put( [logType , logLevel.DEBUG , method , "Starting testcase %s, meme %s" %(n, stringArray[0])])
testResult = False
try:
entityID = api.createEntityFromMeme(stringArray[0])
baseValue = api.getEntityPropertyValue(entityID, stringArray[1])
api.setEntityPropertyValue(entityID, stringArray[1], stringArray[2])
getter = api.getEntityPropertyValue(entityID, stringArray[1])
propType = api.getEntityPropertyType(entityID, stringArray[1])
#reformat the expected result from unicode string to that which is expected in the property
expectedResult = None
if propType == "String":
expectedResult = stringArray[2]
elif propType == "Integer":
expectedResult = int(stringArray[2])
elif propType == "Decimal":
expectedResult = decimal.Decimal(stringArray[2])
else:
expectedResult = False
if str.lower(stringArray[2]) == 'true':
expectedResult = True
#now compare getter to the reformatted stringArray[2] and see if we have successfully altered the property
if getter == expectedResult:
api.revertEntityPropertyValues(entityID, False)
getter = api.getEntityPropertyValue(entityID, stringArray[1])
if getter == baseValue:
testResult = True
except Exceptions.ScriptError:
#Some test cases violate restriction constraints and will raise an exception.
# This works as intended
testResult = False
except Exception as e:
errorMsg = ('Error! Traceback = %s' % (e) )
errata.append(errorMsg)
testcase = str(stringArray[0])
allTrueResult = str(testResult)
expectedResult = "True"
results = [n, testcase, allTrueResult, expectedResult, errata]
resultSet.append(results)
Graph.logQ.put( [logType , logLevel.INFO , method , "Finished testcase %s" %(n)])
Graph.logQ.put( [logType , logLevel.DEBUG , method , "exiting"])
return resultSet
def testRevertEntity():
''' a repeat of the testEntityPhase4 tests, but using revertEntity'''
method = moduleName + '.' + 'testRevertEntity'
Graph.logQ.put( [logType , logLevel.DEBUG , method , "entering"])
results = []
resultSet = []
testFileName = os.path.join(testDirPath, "Entity_Phase4.atest")
readLoc = codecs.open(testFileName, "r", "utf-8")
allLines = readLoc.readlines()
readLoc.close
n = 0
#First, re-run the 4 tests with revertEntity()
for eachReadLine in allLines:
errata = []
n = n+1
stringArray = str.split(eachReadLine)
Graph.logQ.put( [logType , logLevel.DEBUG , method , "Starting testcase %s, meme %s" %(n, stringArray[0])])
testResult = True
try:
entityID = api.createEntityFromMeme(stringArray[0])
baseValue = api.getEntityPropertyValue(entityID, stringArray[1])
api.setEntityPropertyValue(entityID, stringArray[1], stringArray[2])
getter = api.getEntityPropertyValue(entityID, stringArray[1])
propType = api.getEntityPropertyType(entityID, stringArray[1])
#reformat the expected result from unicode string to that which is expected in the property
expectedResult = None
if propType == "String":
expectedResult = stringArray[2]
elif propType == "Integer":
expectedResult = int(stringArray[2])
elif propType == "Decimal":
expectedResult = decimal.Decimal(stringArray[2])
else:
expectedResult = False
if str.lower(stringArray[2]) == 'true':
expectedResult = True
#now compare getter to the reformatted stringArray[2] and see if we have successfully altered the property
if getter == expectedResult:
api.revertEntity(entityID, False)
getter = api.getEntityPropertyValue(entityID, stringArray[1])
if getter != baseValue:
testResult = False
except Exceptions.ScriptError:
#Some test cases violate restriction constraints and will raise an exception.
# This works as intended
testResult = False
except Exception as e:
errorMsg = ('Error! Traceback = %s' % (e) )
errata.append(errorMsg)
testcase = str(stringArray[0])
allTrueResult = str(testResult)
expectedResult = "True"
results = [n, testcase, allTrueResult, expectedResult, errata]
resultSet.append(results)
Graph.logQ.put( [logType , logLevel.INFO , method , "Finished testcase %s" %(n)])
#Second, test with a custom property with revertEntity()
for eachReadLine in allLines:
errata = []
n = n+1
stringArray = str.split(eachReadLine)
Graph.logQ.put( [logType , logLevel.DEBUG , method , "Starting testcase %s, meme %s" %(n, stringArray[0])])
testResult = True
try:
entityID = api.createEntityFromMeme(stringArray[0])
#Create a property named after the current n count and give it the n value
currValue = "%s" %n
Graph.api.addEntityIntegerProperty(entityID, currValue, currValue)
getter = Graph.api.getEntityHasProperty(entityID, currValue)
if getter != True:
testResult = False
Graph.api.revertEntity(entityID, currValue)
getter = Graph.api.getEntityHasProperty(entityID, currValue)
if getter == True:
testResult = False
except Exceptions.ScriptError:
#Some test cases violate restriction constraints and will raise an exception.
# This works as intended
testResult = False
except Exception as e:
errorMsg = ('Error! Traceback = %s' % (e) )
errata.append(errorMsg)
testcase = str(stringArray[0])
allTrueResult = str(testResult)
expectedResult = "True"
results = [n, testcase, allTrueResult, expectedResult, errata]
resultSet.append(results)
Graph.logQ.put( [logType , logLevel.INFO , method , "Finished testcase %s" %(n)])
#Lastly, rerun test 4 and then add a property and test revertEntity()
for eachReadLine in allLines:
errata = []
n = n+1
stringArray = str.split(eachReadLine)
Graph.logQ.put( [logType , logLevel.DEBUG , method , "Starting testcase %s, meme %s" %(n, stringArray[0])])
testResult = True
try:
entityID = api.createEntityFromMeme(stringArray[0])
baseValue = api.getEntityPropertyValue(entityID, stringArray[1])
api.setEntityPropertyValue(entityID, stringArray[1], stringArray[2])
getter = api.getEntityPropertyValue(entityID, stringArray[1])
propType = api.getEntityPropertyType(entityID, stringArray[1])
#reformat the expected result from unicode string to that which is expected in the property
expectedResult = None
if propType == "String":
expectedResult = stringArray[2]
elif propType == "Integer":
expectedResult = int(stringArray[2])
elif propType == "Decimal":
expectedResult = decimal.Decimal(stringArray[2])
else:
expectedResult = False
if str.lower(stringArray[2]) == 'true':
expectedResult = True
#Create a property named after the current n count and give it the n value
currValue = "%s" %n
Graph.api.addEntityIntegerProperty(entityID, currValue, currValue)
getter = Graph.api.getEntityHasProperty(entityID, currValue)
if getter != True:
testResult = False
#now compare getter to the reformatted stringArray[2] and see if we have successfully altered the property
if getter == expectedResult:
api.revertEntity(entityID, False)
getter = api.getEntityPropertyValue(entityID, stringArray[1])
if getter != baseValue:
testResult = False
#Make sure the custom property is gone
Graph.api.revertEntity(entityID, currValue)
getter = Graph.api.getEntityHasProperty(entityID, currValue)
if getter == True:
testResult = False
except Exceptions.ScriptError:
#Some test cases violate restriction constraints and will raise an exception.
# This works as intended
testResult = False
except Exception as e:
errorMsg = ('Error! Traceback = %s' % (e) )
errata.append(errorMsg)
testcase = str(stringArray[0])
allTrueResult = str(testResult)
expectedResult = "True"
results = [n, testcase, allTrueResult, expectedResult, errata]
resultSet.append(results)
Graph.logQ.put( [logType , logLevel.INFO , method , "Finished testcase %s" %(n)])
Graph.logQ.put( [logType , logLevel.DEBUG , method , "exiting"])
return resultSet
def testEntityPhase6():
''' Check and see if the meme is a singleton
Tests getMemeIsSingleton
Tests getEntityFromMeme in singleton context
Strategy -
If the meme is a singleton, then it should have had an entity created already
1 - Is the meme a singleton?
2a - If not, then entity.uuid should be non-existent
2b - If so, then entity.uuid should have a UUID
3b - create an entiity
4b - is the UUID the same as before? It should be
'''
method = moduleName + '.' + 'testEntityPhase6'
Graph.logQ.put( [logType , logLevel.DEBUG , method , "entering"])
results = []
resultSet = []
testFileName = os.path.join(testDirPath, "Entity_Phase6.atest")
readLoc = codecs.open(testFileName, "r", "utf-8")
allLines = readLoc.readlines()
readLoc.close
n = 0
for eachReadLine in allLines:
errata = []
n = n+1
stringArray = str.split(eachReadLine)
Graph.logQ.put( [logType , logLevel.DEBUG , method , "Starting testcase %s, meme %s" %(n, stringArray[0])])
expectedTestResult = False
if | |
stock level
first_and_last_days = []
for i, y in enumerate(sorted(self.three_by_year.keys())):
if (i == 0 and (len(self.stocklevels) > 0)):
# on the first loop, use the earliest stocklevel
# instead of january 1 of that year
if self.stocklevels[0]['date'].year in self.three_by_year.keys():
first_and_last_days.append(self.stocklevels[0]['date'])
else:
first_and_last_days.append(datetime.date(y, 1, 1))
else:
first_and_last_days.append(datetime.date(y, 1, 1))
first_and_last_days.append(datetime.date(y, 12, 31))
self.three_month_buffers = [self.three_by_year[d.year] for d in first_and_last_days if d.year in self.three_by_year]
self.nine_month_buffers = [self.nine_by_year[d.year] for d in first_and_last_days if d.year in self.nine_by_year]
except Exception,e:
print 'ERROR BUFFERS'
print e
if self.interactive:
import ipdb; ipdb.set_trace()
try:
# perform any required calculations before beginning fig
# construction, so they won't have to be repeated when
# constructing multiple figs
projected_ff_dates = []
projected_ff_levels = []
if self.calc_forecast_projection and (len(self.stocklevels) > 0):
projected_ff_dates, projected_ff_levels = self._calc_stock_levels(\
"FF", self.stocklevels[-1]['date'], self.stocklevels[-1]['amount'])
projected_fp_dates = []
projected_fp_levels = []
if self.calc_purchased_projection and (len(self.stocklevels) > 0):
projected_fp_dates, projected_fp_levels = self._calc_stock_levels(\
"FP", self.stocklevels[-1]['date'], self.stocklevels[-1]['amount'])
projected_co_dates = []
projected_co_levels = []
if self.calc_theoretical_forecast and (len(self.stocklevels) > 0):
projected_co_dates, projected_co_levels = self._calc_stock_levels(\
"CO", self.stocklevels[0]['date'], None, self.today)
projected_un_dates = []
projected_un_levels = []
if self.calc_adjusted_theoretical_forecast and (len(self.stocklevels) > 0):
projected_un_dates, projected_un_levels = self._calc_stock_levels(\
"UN", self.stocklevels[0]['date'], None, self.today)
except Exception, e:
print 'BANG calculations'
print e
try:
if self.dump:
sl = dict(zip(self.dates, self.levels))
ff = dict(zip(projected_ff_dates, projected_ff_levels))
fp = dict(zip(projected_fp_dates, projected_fp_levels))
co = dict(zip(projected_co_dates, projected_co_levels))
un = dict(zip(projected_un_dates, projected_un_levels))
all_years = set(self.f_years + self.ff_years + self.s_years)
first_date = datetime.date(min(all_years), 1, 1)
last_date = datetime.date(self.today.year, 12, 31)
possible_days = last_date - first_date
one_day = datetime.timedelta(days=1)
day_pointer = first_date
time_series = list()
# TODO always dump one with column names for export and one without for dygraph
if self.dump_column_names:
time_series.append(["date", "actual stock level", "3 month buffer stock level", "9 month overstock level", "future delivery on forecast", "future delivery on purchase", "original country office forecast", "unicef delivery"])
for day in range(possible_days.days):
this_day = day_pointer
td = this_day.isoformat()
if sl.has_key(this_day):
sto = sl[this_day]
else:
sto = ""
if self.three_by_year.has_key(this_day.year):
tmo = self.three_by_year[this_day.year]
else:
tmo = ""
if self.nine_by_year.has_key(this_day.year):
nmo = self.nine_by_year[this_day.year]
else:
nmo = ""
if ff.has_key(this_day):
ffl = str(ff[this_day])
if ffl == "0":
ffl = ""
else:
ffl = ""
if fp.has_key(this_day):
fpl = str(fp[this_day])
if fpl == "0":
fpl = ""
else:
fpl = ""
if co.has_key(this_day):
col = str(co[this_day])
if col == "0":
col = ""
else:
col = ""
if un.has_key(this_day):
unl = str(un[this_day])
if unl == "0":
unl = ""
else:
unl = ""
# date, stocklevel, 3mo buffer, 9mo overstock, future on forecast, future on po, orig country forecast, unicef delivery
time_series.append([td, sto, tmo, nmo, ffl, fpl, col, unl])
day_pointer = this_day + one_day
#file_name = "/tmp/%s_%s_all.csv" % (self.country_pk, self.group_slug)
country_code = self.country_pk
file_path = "/home/ubuntu/vax/vaxapp/static/csvs/%s/%s/%s/" % (country_code, self.group_slug, self.today.year)
file_name = "%s_%s_%s_%s_%s.csv" % (country_code, self.group_slug, self.today.year, self.today.month, self.today.day)
local_file = file_path + file_name
anon_country_code = "".join([str(letter_position(l)).zfill(2) for l in self.country_pk])
anon_file_path = "/home/ubuntu/vax/vaxapp/static/csvs/%s/%s/%s/" % (anon_country_code, self.group_slug, self.today.year)
anon_file_name = "%s_%s_%s_%s_%s.csv" % (anon_country_code, self.group_slug, self.today.year, self.today.month, self.today.day)
anon_local_file = anon_file_path + anon_file_name
try:
os.makedirs(file_path)
except OSError, e:
# don't raise if the path already exists,
# only if there is another error (permission, etc)
if e.errno != errno.EEXIST:
raise
with open(local_file, 'wb') as f:
csvwriter = csv.writer(f, delimiter=',')
csvwriter.writerows(time_series)
try:
os.makedirs(anon_file_path)
except OSError, e:
# don't raise if the path already exists,
# only if there is another error (permission, etc)
if e.errno != errno.EEXIST:
raise
with open(anon_local_file, 'wb') as f:
csvwriter = csv.writer(f, delimiter=',')
csvwriter.writerows(time_series)
if self.upload_csv_to_s3:
# TODO queue uploading with celery so uploading
# will not delay generation of next csv
try:
country_code = self.country_pk
if self.anon:
country_code = "".join([str(letter_position(l)).zfill(2) for l in self.country_pk])
s3_key = "%s_%s_%s_%s_%s.csv" % (country_code, self.group_slug, self.today.year, self.today.month, self.today.day)
s3_path = "%s/%s/%s/" % (country_code, self.group_slug, self.today.year)
upload_file(filename, 'vaxtrack_csv', s3_path + s3_key, True)
print s3_key
except Exception, e:
print 'ERROR UPLOADING'
print e
return
except Exception, e:
print 'BANG dump'
print e
if self.interactive:
import ipdb; ipdb.set_trace()
def analyze(self):
if not self.get_and_set_vars():
return False
print '~~~~ANALYZING~~~~'
print self.country_pk
print self.vaccine_abbr
print self.group_slug
print '~~~~~~~~~~~~~~~~~'
try:
last_s = {}
self.consumed_in_year = {}
# populate dicts with years and 0s
for y in self.s_years:
self.consumed_in_year.update({y:0})
last_s.update({y:0})
print self.s_years
for d in get_group_all_stocklevels_asc(self.country_pk, self.group_slug):
yr = int(d['year'])
s = int(d['amount'])
if not yr in self.s_years:
continue
# if this day's stocklevel is less than the last stocklevel...
if s <= last_s[yr]:
# amount consumed this day is difference between
# this day's stocklevel and the last one
consumed_today = last_s[yr] - s
# add this amount to this year's running total
consumed_this_year = self.consumed_in_year[yr] + consumed_today
# update dict with new sum
self.consumed_in_year.update({yr:consumed_this_year})
# set this day's stocklevel as last one and continue looping
last_s[yr] = s
print self.consumed_in_year
self.actual_cons_rate = {}
self.days_of_stock_data = {}
for y in self.s_years:
# get all stocklevel datapoints from year
stocklevels_in_year = get_group_type_for_year_asc(self.country_pk, self.group_slug, 'SL', y, 'unknown')
print len(stocklevels_in_year)
# find number of days enclosed between first stocklevel entry of year and last
if len(stocklevels_in_year) > 0:
self.days_of_stock_data.update({y:(stocklevels_in_year[-1]['date'] - stocklevels_in_year[0]['date']).days})
rate = float(self.consumed_in_year[y])/float(self.days_of_stock_data[y])
self.actual_cons_rate.update({y:int(rate)})
print self.actual_cons_rate
# "Query 2" Order Lead Time
# see if there are forecasted deliveries and/or purchased deliveries
# scheduled for the near future
print 'Query 2'
self.forecasted_this_year = get_group_type_for_year_asc(self.country_pk, self.group_slug, "FF", self.today.year)
self.on_po_this_year = get_group_type_for_year_asc(self.country_pk, self.group_slug, "FP", self.today.year)
self.upcoming_on_po = [d for d in self.on_po_this_year if ((d['date'] - self.today) <= self.lookahead)]
self.doses_on_orders = reduce(lambda s,d: s + d['amount'], self.on_po_this_year, 0)
self.upcoming_forecasted = [d for d in self.forecasted_this_year if ((d['date'] - self.today) <= self.lookahead)]
if self.today.year not in self.f_years:
# if there is no forecast for the reference date's year,
# don't perform any of these queries
self.analyzed = True
return
if len(self.stocklevels) == 0:
self.analyzed = True
return
else:
self.has_stock_data = True
# "Query 1" Forecast Accuracy
# for this year, see how actual consumption rate compares to estimated daily rate
print 'Query 1'
est_cons_rate = int(float(self.annual_demand[self.today.year])/float(365))
print est_cons_rate
if self.today.year in self.actual_cons_rate:
rate_difference = float(abs(est_cons_rate - self.actual_cons_rate[self.today.year]))/float(est_cons_rate)
print rate_difference
# flag if difference is greater than threshold
if rate_difference > self.cons_rate_diff_threshold:
print '***FLAG***'
print 'major difference between forecast and actual consumption rates'
handle_alert(countrystock=self.cs,\
reference_date=self.today, status='W', risk='F', text='C')
# "Query 3" Stock Management
# see how many months worth of supply are in stock
print 'Query 3'
latest_stocklevel = self.stocklevels[0]
if self.today.year in self.annual_demand:
self.est_daily_cons = int(float(self.annual_demand[self.today.year])/float(365))
self.days_of_stock = int(float(latest_stocklevel['amount'])/float(self.est_daily_cons))
print '%s days of stock' % str(self.days_of_stock)
# check if there is too much stock (more than nine months' worth)
if self.days_of_stock >= 270:
# "Query 4" Stock Management
# flag if there are any upcoming deliveries (forecasted or purchased)
print 'Query 4'
if (len(self.upcoming_forecasted) > 0) or (len(self.upcoming_on_po) > 0):
print '***FLAG***'
print 'delay or reduce shipment'
handle_alert(countrystock=self.cs,\
reference_date=self.today, status='U', risk='O', text='D')
else:
print '---OK---'
this_years_levels = Analysis.filter(self.stocklevels, 'year', self.today.year)
if len(this_years_levels) > 0:
self.first_level_this_year = Analysis.filter(self.stocklevels, 'year', self.today.year)[-1]['amount']
self.deliveries_this_year = get_group_type_for_year(self.country_pk, self.group_slug, "UN", self.today.year)
self.doses_delivered_this_year = reduce(lambda s,d: s + d['amount'], self.deliveries_this_year, 0)
#self.doses_on_orders = reduce(lambda s,d: s + d['amount'], self.upcoming_on_po, 0)
self.demand_for_period = self.lookahead.days * self.est_daily_cons
# "Query 5" Stock Management
# calculate % coverage of annual need
print 'Query 5'
self.percent_coverage = float(self.first_level_this_year + self.doses_delivered_this_year)/float(self.annual_demand[self.today.year])
print '%s percent coverage' % str(self.percent_coverage)
else:
self.percent_coverage = 0
self.demand_for_period = 0
# check if there is insufficient stock (less than three months' worth)
if self.days_of_stock <= 90:
if (self.percent_coverage >= (0.25 + float(self.today.month)/12.0)) and (self.percent_coverage <= (0.5 + float(self.today.month)/12.0)):
print '---OK---'
if self.percent_coverage < (0.25 + float(self.today.month)/12.0):
# "Query 7" Stock Management
if (len(self.upcoming_on_po) > 0):
if self.doses_on_orders < self.demand_for_period:
print '***FLAG***'
print 'risk of stockout'
print 'order immediately -- not enough on upcoming deliveries'
handle_alert(countrystock=self.cs,\
reference_date=self.today, status='U', risk='S', text='I')
else:
print '---OK---'
elif (len(self.upcoming_forecasted) > 0):
print '***FLAG***'
print 'risk of stockout'
print 'order immediately - purchase forecasted delivery'
handle_alert(countrystock=self.cs,\
reference_date=self.today, status='U', risk='S', text='F')
else:
print '***FLAG***'
print 'risk of stockout'
print 'order immediately - no supply on PO or forecasted for next 3 | |
import math
import itertools
from operator import itemgetter
import json
import os
import random
from .geom import hflip_pattern, vflip_pattern, rot_pattern
from .patterns import (
get_pattern_size,
get_pattern_livecount,
get_grid_empty,
get_grid_pattern,
segment_pattern,
methuselah_quadrants_pattern,
pattern_union,
cloud_region,
)
from .utils import pattern2url, retry_on_failure
from .error import GollyXPatternsError, GollyXMapsError
##############
# Util methods
def get_rainbow_pattern_function_map():
return {
"rainbowmath": rainbowmath_fourcolor,
"rainbow": rainbow_fourcolor,
"sunburst": sunburst_fourcolor,
"quadgaussian": quadgaussian_fourcolor,
"random": random_fourcolor,
"timebomb": timebomb_fourcolor,
"timebombredux": timebomb2_fourcolor,
"randommethuselahs": randommethuselahs_fourcolor,
"crabs": crabs_fourcolor,
"patiolights": patiolights_fourcolor,
"orchard": orchard_fourcolor,
"justyna": justyna_fourcolor,
"rabbits": rabbits_fourcolor,
"multum": multum_fourcolor,
"eights": eightx_fourcolor,
# Need one more
}
def rainbow_jitteryrow_pattern(rows, cols, seed=None, methuselah=None, spacing=None):
if seed is not None:
random.seed(seed)
# L is a characteristic length scale
if spacing is None:
L = 10
else:
L = spacing
if methuselah is None:
methuselah = "rheptomino"
count = cols // L
centerx = cols // 2
centery = rows // 2
# Place one methuselah every L grid spaces,
# up to the maximum multiple of 4 possible
maxshapesperteam = (cols // 4) // L
maxshapes = 4 * maxshapesperteam
team_assignments = [0, 1, 2, 3]
random.shuffle(team_assignments)
rotdegs = [0, 90, 180, 270]
patterns_list_all = [[], [], [], []]
# This algorithm is structured unusually,
# but ensures everything is centered.
for i in range(maxshapesperteam):
# Populate all four quadrants manually...
end = (i + 1) * L
start = end - L // 2
# +---------------+
# |Q1 |Q2 |Q3 |Q4 |
# | | | | |
# +---------------+
#
# Q1
pattern = get_grid_pattern(
methuselah,
rows,
cols,
xoffset=centerx - centerx // 2 - random.randint(start, end),
yoffset=centery + random.randint(-L, L),
hflip=bool(random.getrandbits(1)),
vflip=bool(random.getrandbits(1)),
rotdeg=random.choice(rotdegs),
)
team_ix = team_assignments[0]
team_patterns_list = patterns_list_all[team_ix]
team_patterns_list.append(pattern)
patterns_list_all[team_ix] = team_patterns_list
# Q2
pattern = get_grid_pattern(
methuselah,
rows,
cols,
xoffset=centerx - random.randint(start, end),
yoffset=centery + random.randint(-L, L),
hflip=bool(random.getrandbits(1)),
vflip=bool(random.getrandbits(1)),
rotdeg=random.choice(rotdegs),
)
team_ix = team_assignments[1]
team_patterns_list = patterns_list_all[team_ix]
team_patterns_list.append(pattern)
patterns_list_all[team_ix] = team_patterns_list
# Q3
pattern = get_grid_pattern(
methuselah,
rows,
cols,
xoffset=centerx + random.randint(start, end),
yoffset=centery + random.randint(-L, L),
hflip=bool(random.getrandbits(1)),
vflip=bool(random.getrandbits(1)),
rotdeg=random.choice(rotdegs),
)
team_ix = team_assignments[2]
team_patterns_list = patterns_list_all[team_ix]
team_patterns_list.append(pattern)
patterns_list_all[team_ix] = team_patterns_list
# Q4
pattern = get_grid_pattern(
methuselah,
rows,
cols,
xoffset=centerx + centerx // 2 + random.randint(start, end),
yoffset=centery + random.randint(-L, L),
hflip=bool(random.getrandbits(1)),
vflip=bool(random.getrandbits(1)),
rotdeg=random.choice(rotdegs),
)
team_ix = team_assignments[3]
team_patterns_list = patterns_list_all[team_ix]
team_patterns_list.append(pattern)
patterns_list_all[team_ix] = team_patterns_list
pattern_unions = [pattern_union(pl) for pl in patterns_list_all]
return tuple(pattern_unions)
def rainbow_methuselah_quadrants_pattern(
rows, cols, seed=None, methuselah_counts=None, fixed_methuselah=None
):
"""
Add methuselahs to each quadrant.
If the user does not specify any args,
this fills the quadrants with lots of
small methuselahs.
The user can specify which methuselahs
to use and how many to use, so e.g.
can specify 1 methuselah per quadrant, etc.
"""
# set rng seed (optional)
if seed is not None:
random.seed(seed)
small_methuselah_names = [
"bheptomino",
"cheptomino",
"eheptomino",
"piheptomino",
"rpentomino",
]
reg_methuselah_names = [
"acorn",
"bheptomino",
"cheptomino",
"eheptomino",
"multuminparvo",
"piheptomino",
"rabbit",
"rpentomino",
]
BIGDIMLIMIT = 150
mindim = min(rows, cols)
if methuselah_counts is None:
if mindim < BIGDIMLIMIT:
methuselah_counts = [3, 4, 9]
else:
methuselah_counts = [3, 4, 9, 16]
if fixed_methuselah is None:
if mindim < BIGDIMLIMIT:
methuselah_names = reg_methuselah_names + small_methuselah_names
else:
methuselah_names = small_methuselah_names
else:
methuselah_names = [fixed_methuselah]
valid_mc = [1, 2, 3, 4, 9, 16]
for mc in methuselah_counts:
if mc not in valid_mc:
msg = "Invalid methuselah counts passed: must be in {', '.join(valid_mc)}\n"
msg += "you specified {', '.join(methuselah_counts)}"
raise GollyXPatternsError(msg)
# Put a cluster of methuselahs in each quadrant,
# one quadrant per team.
# Procedure:
# place random methuselah patterns in each quadrant corner
# Store each quadrant and its upper left corner in (rows from top, cols from left) format
quadrants = [
(1, (0, cols // 2)),
(2, (0, 0)),
(3, (rows // 2, 0)),
(4, (rows // 2, cols // 2)),
]
rotdegs = [0, 90, 180, 270]
all_methuselahs = []
for iq, quad in enumerate(quadrants):
count = random.choice(methuselah_counts)
if count == 1:
# Only one methuselah in this quadrant, so use the center
jitterx = 4
jittery = 4
corner = quadrants[iq][1]
y = corner[0] + rows // 4 + random.randint(-jittery, jittery)
x = corner[1] + cols // 4 + random.randint(-jitterx, jitterx)
meth = random.choice(methuselah_names)
pattern = get_grid_pattern(
meth,
rows,
cols,
xoffset=x,
yoffset=y,
hflip=bool(random.getrandbits(1)),
vflip=bool(random.getrandbits(1)),
rotdeg=random.choice(rotdegs),
)
livecount = get_pattern_livecount(meth)
all_methuselahs.append((livecount, pattern))
elif count == 2 or count == 4:
# Two or four methuselahs in this quadrant, so place at corners of a square
# Form the square by cutting the quadrant into thirds
if count == 4:
jitterx = 3
jittery = 3
else:
jitterx = 5
jittery = 5
corner = quadrants[iq][1]
# Slices and partitions form the inside square
nslices = 2
nparts = nslices + 1
posdiag = bool(random.getrandbits(1))
for a in range(1, nparts):
for b in range(1, nparts):
proceed = False
if count == 2:
if (posdiag and a == b) or (
not posdiag and a == (nslices - b + 1)
):
proceed = True
elif count == 4:
proceed = True
if proceed:
y = (
corner[0]
+ a * ((rows // 2) // nparts)
+ random.randint(-jittery, jittery)
)
x = (
corner[1]
+ b * ((cols // 2) // nparts)
+ random.randint(-jitterx, jitterx)
)
meth = random.choice(methuselah_names)
try:
pattern = get_grid_pattern(
meth,
rows,
cols,
xoffset=x,
yoffset=y,
hflip=bool(random.getrandbits(1)),
vflip=bool(random.getrandbits(1)),
rotdeg=random.choice(rotdegs),
)
except GollyXPatternsError:
raise GollyXPatternsError(
f"Error with methuselah {meth}: cannot fit"
)
livecount = get_pattern_livecount(meth)
all_methuselahs.append((livecount, pattern))
elif count == 3 or count == 9:
# Three or nine methuselahs, place these on a square with three points per side
# or eight points total
if count == 9:
jitterx = 3
jittery = 3
else:
jitterx = 5
jittery = 5
corner = quadrants[iq][1]
nslices = 4
for a in range(1, nslices):
for b in range(1, nslices):
proceed = False
if count == 3:
if a == b:
proceed = True
elif count == 9:
proceed = True
if proceed:
y = (
corner[0]
+ a * ((rows // 2) // nslices)
+ random.randint(-jittery, jittery)
)
x = (
corner[1]
+ b * ((cols // 2) // nslices)
+ random.randint(-jitterx, jitterx)
)
meth = random.choice(methuselah_names)
try:
pattern = get_grid_pattern(
meth,
rows,
cols,
xoffset=x,
yoffset=y,
hflip=bool(random.getrandbits(1)),
vflip=bool(random.getrandbits(1)),
rotdeg=random.choice(rotdegs),
)
except GollyXPatternsError:
raise GollyXPatternsError(
f"Error with methuselah {meth}: cannot fit"
)
livecount = get_pattern_livecount(meth)
all_methuselahs.append((livecount, pattern))
elif count == 16:
# Sixteen methuselahs, place these on a 4x4 square
jitterx = 2
jittery = 2
corner = quadrants[iq][1]
nslices = 5
for a in range(1, nslices):
for b in range(1, nslices):
y = (
corner[0]
+ a * ((rows // 2) // nslices)
+ random.randint(-jittery, jittery)
)
x = (
corner[1]
+ b * ((cols // 2) // nslices)
+ random.randint(-jitterx, jitterx)
)
meth = random.choice(methuselah_names)
try:
pattern = get_grid_pattern(
meth,
rows,
cols,
xoffset=x,
yoffset=y,
hflip=bool(random.getrandbits(1)),
vflip=bool(random.getrandbits(1)),
rotdeg=random.choice(rotdegs),
)
except GollyXPatternsError:
raise GollyXPatternsError(
f"Error with methuselah {meth}: cannot fit"
)
livecount = get_pattern_livecount(meth)
all_methuselahs.append((livecount, pattern))
random.shuffle(all_methuselahs)
# Sort by number of live cells
all_methuselahs.sort(key=itemgetter(0), reverse=True)
team1_patterns = []
team2_patterns = []
team3_patterns = []
team4_patterns = []
asc = [1, 2, 3, 4]
ascrev = list(reversed(asc))
serpentine_pattern = asc + ascrev
for i, (_, methuselah_pattern) in enumerate(all_methuselahs):
serpix = i % len(serpentine_pattern)
serpteam = serpentine_pattern[serpix]
if serpteam == 1:
team1_patterns.append(methuselah_pattern)
elif serpteam == 2:
team2_patterns.append(methuselah_pattern)
elif serpteam == 3:
team3_patterns.append(methuselah_pattern)
elif serpteam == 4:
team4_patterns.append(methuselah_pattern)
team1_pattern = pattern_union(team1_patterns)
team2_pattern = pattern_union(team2_patterns)
team3_pattern = pattern_union(team3_patterns)
team4_pattern = pattern_union(team4_patterns)
return team1_pattern, team2_pattern, team3_pattern, team4_pattern
#############
# Map methods
def random_fourcolor(rows, cols, seed=None):
"""
Generate a random four-color list life initialization.
Returns: four listlife strings,
with the random initializations.
(8-20% of all cells are alive).
Strategy: generate a set of (x,y) tuples,
convert to list, split in four. Use those
point sets to create listLife URL strings.
"""
if seed is not None:
random.seed(seed)
density = random.randint(8, 18) / 100.0
ncells = rows * cols
nlivecells = 4 * ((density * ncells) // 4)
points = set()
while len(points) < nlivecells:
randy = | |
attributes
compare_system_and_attributes_csv(self, '03')
def test_system_importer_file_csv_upload_post_minimal_headline(self):
"""test importer view"""
# change config
set_config_headline()
# login testuser
self.client.login(
username='testuser_system_importer_file_csv_minimal',
password='<PASSWORD>',
)
# open upload file
systemcsv = open(
os.path.join(
BASE_DIR,
'dfirtrack_main/tests/system_importer/system_importer_file_csv_files/system_importer_file_csv_testfile_03_minimal_headline.csv',
)
)
# create post data
data_dict = {
'systemcsv': systemcsv,
}
# create url
destination = urllib.parse.quote('/system/', safe='/')
# get response
response = self.client.post('/system/importer/file/csv/upload/', data_dict)
# get messages
messages = list(get_messages(response.wsgi_request))
# close file
systemcsv.close()
# compare - meta
self.assertRedirects(
response, destination, status_code=302, target_status_code=200
)
# compare - messages
compare_messages_csv(self, messages)
# compare - systems / attributes
compare_system_and_attributes_csv(self, '03')
""" field delimiter - comma """
def test_system_importer_file_csv_cron_minimal_comma(self):
"""test importer view"""
# change config
set_csv_import_filename(
'system_importer_file_csv_testfile_21_minimal_comma.csv'
)
# change config
set_config_field_delimiter_comma()
# mock timezone.now()
t_5 = datetime(2021, 3, 7, 21, 12, tzinfo=timezone.utc)
with patch.object(timezone, 'now', return_value=t_5):
# execute cron job / scheduled task
system_cron()
# login testuser
self.client.login(
username='testuser_system_importer_file_csv_minimal',
password='<PASSWORD>',
)
# get response
response = self.client.get('/system/')
# get messages
messages = list(get_messages(response.wsgi_request))
# compare - user 1
self.assertEqual(
str(response.context['user']), 'testuser_system_importer_file_csv_minimal'
)
self.assertEqual(
messages[0].message,
'System CSV importer: created: 3 | updated: 0 | skipped: 0 | multiple: 0 [2021-03-07 21:12:00 - 2021-03-07 21:12:00]',
)
self.assertEqual(messages[0].level_tag, 'success')
# switch user context
self.client.logout()
self.client.login(username='message_user', password='<PASSWORD>')
# get response
response = self.client.get('/system/')
# get messages
messages = list(get_messages(response.wsgi_request))
# compare - user 2
self.assertEqual(str(response.context['user']), 'message_user')
self.assertEqual(
messages[0].message,
'System CSV importer: created: 3 | updated: 0 | skipped: 0 | multiple: 0 [2021-03-07 21:12:00 - 2021-03-07 21:12:00]',
)
self.assertEqual(messages[0].level_tag, 'success')
# compare - systems / attributes
compare_system_and_attributes_csv(self, '21')
# compare domain (delimiter specific)
compare_delimiter_specific(self, '21')
def test_system_importer_file_csv_instant_minimal_comma(self):
"""test importer view"""
# change config
set_csv_import_filename(
'system_importer_file_csv_testfile_21_minimal_comma.csv'
)
# change config
set_config_field_delimiter_comma()
# login testuser
self.client.login(
username='testuser_system_importer_file_csv_minimal',
password='<PASSWORD>',
)
# create url
destination = urllib.parse.quote('/system/', safe='/')
# get response
response = self.client.get('/system/importer/file/csv/instant/', follow=True)
# get messages
messages = list(get_messages(response.wsgi_request))
# compare - meta
self.assertRedirects(
response, destination, status_code=302, target_status_code=200
)
# compare - messages
compare_messages_csv(self, messages)
# compare - systems / attributes
compare_system_and_attributes_csv(self, '21')
# compare domain (delimiter specific)
compare_delimiter_specific(self, '21')
def test_system_importer_file_csv_upload_post_minimal_comma(self):
"""test importer view"""
# change config
set_config_field_delimiter_comma()
# login testuser
self.client.login(
username='testuser_system_importer_file_csv_minimal',
password='<PASSWORD>',
)
# open upload file
systemcsv = open(
os.path.join(
BASE_DIR,
'dfirtrack_main/tests/system_importer/system_importer_file_csv_files/system_importer_file_csv_testfile_21_minimal_comma.csv',
)
)
# create post data
data_dict = {
'systemcsv': systemcsv,
}
# create url
destination = urllib.parse.quote('/system/', safe='/')
# get response
response = self.client.post('/system/importer/file/csv/upload/', data_dict)
# get messages
messages = list(get_messages(response.wsgi_request))
# close file
systemcsv.close()
# compare - meta
self.assertRedirects(
response, destination, status_code=302, target_status_code=200
)
# compare - messages
compare_messages_csv(self, messages)
# compare - systems / attributes
compare_system_and_attributes_csv(self, '21')
# compare domain (delimiter specific)
compare_delimiter_specific(self, '21')
""" field delimiter - semicolon """
def test_system_importer_file_csv_cron_minimal_semicolon(self):
"""test importer view"""
# change config
set_csv_import_filename(
'system_importer_file_csv_testfile_22_minimal_semicolon.csv'
)
# change config
set_config_field_delimiter_semicolon()
# mock timezone.now()
t_6 = datetime(2021, 3, 7, 21, 17, tzinfo=timezone.utc)
with patch.object(timezone, 'now', return_value=t_6):
# execute cron job / scheduled task
system_cron()
# login testuser
self.client.login(
username='testuser_system_importer_file_csv_minimal',
password='<PASSWORD>',
)
# get response
response = self.client.get('/system/')
# get messages
messages = list(get_messages(response.wsgi_request))
# compare - user 1
self.assertEqual(
str(response.context['user']), 'testuser_system_importer_file_csv_minimal'
)
self.assertEqual(
messages[0].message,
'System CSV importer: created: 3 | updated: 0 | skipped: 0 | multiple: 0 [2021-03-07 21:17:00 - 2021-03-07 21:17:00]',
)
self.assertEqual(messages[0].level_tag, 'success')
# switch user context
self.client.logout()
self.client.login(username='message_user', password='<PASSWORD>')
# get response
response = self.client.get('/system/')
# get messages
messages = list(get_messages(response.wsgi_request))
# compare - user 2
self.assertEqual(str(response.context['user']), 'message_user')
self.assertEqual(
messages[0].message,
'System CSV importer: created: 3 | updated: 0 | skipped: 0 | multiple: 0 [2021-03-07 21:17:00 - 2021-03-07 21:17:00]',
)
self.assertEqual(messages[0].level_tag, 'success')
# compare - systems / attributes
compare_system_and_attributes_csv(self, '22')
# compare domain (delimiter specific)
compare_delimiter_specific(self, '22')
def test_system_importer_file_csv_instant_minimal_semicolon(self):
"""test importer view"""
# change config
set_csv_import_filename(
'system_importer_file_csv_testfile_22_minimal_semicolon.csv'
)
# change config
set_config_field_delimiter_semicolon()
# login testuser
self.client.login(
username='testuser_system_importer_file_csv_minimal',
password='<PASSWORD>',
)
# create url
destination = urllib.parse.quote('/system/', safe='/')
# get response
response = self.client.get('/system/importer/file/csv/instant/', follow=True)
# get messages
messages = list(get_messages(response.wsgi_request))
# compare
self.assertRedirects(
response, destination, status_code=302, target_status_code=200
)
# compare - messages
compare_messages_csv(self, messages)
# compare - systems / attributes
compare_system_and_attributes_csv(self, '22')
# compare domain (delimiter specific)
compare_delimiter_specific(self, '22')
def test_system_importer_file_csv_upload_post_minimal_semicolon(self):
"""test importer view"""
# change config
set_config_field_delimiter_semicolon()
# login testuser
self.client.login(
username='testuser_system_importer_file_csv_minimal',
password='<PASSWORD>',
)
# open upload file
systemcsv = open(
os.path.join(
BASE_DIR,
'dfirtrack_main/tests/system_importer/system_importer_file_csv_files/system_importer_file_csv_testfile_22_minimal_semicolon.csv',
)
)
# create post data
data_dict = {
'systemcsv': systemcsv,
}
# create url
destination = urllib.parse.quote('/system/', safe='/')
# get response
response = self.client.post('/system/importer/file/csv/upload/', data_dict)
# get messages
messages = list(get_messages(response.wsgi_request))
# close file
systemcsv.close()
# compare - meta
self.assertRedirects(
response, destination, status_code=302, target_status_code=200
)
# compare - messages
compare_messages_csv(self, messages)
# compare - systems / attributes
compare_system_and_attributes_csv(self, '22')
# compare domain (delimiter specific)
compare_delimiter_specific(self, '22')
""" ip delimiter - comma """
def test_system_importer_file_csv_cron_ip_delimiter_comma(self):
"""test importer view"""
# change config
set_csv_import_filename(
'system_importer_file_csv_testfile_51_ip_delimiter_comma.csv'
)
# change config
set_config_ip_delimiter_comma()
# mock timezone.now()
t_7 = datetime(2021, 3, 21, 19, 35, tzinfo=timezone.utc)
with patch.object(timezone, 'now', return_value=t_7):
# execute cron job / scheduled task
system_cron()
# login testuser
self.client.login(
username='testuser_system_importer_file_csv_minimal',
password='<PASSWORD>',
)
# get response
response = self.client.get('/system/')
# get messages
messages = list(get_messages(response.wsgi_request))
# compare - user 1
self.assertEqual(
str(response.context['user']), 'testuser_system_importer_file_csv_minimal'
)
self.assertEqual(
messages[0].message,
'System CSV importer: created: 3 | updated: 0 | skipped: 0 | multiple: 0 [2021-03-21 19:35:00 - 2021-03-21 19:35:00]',
)
self.assertEqual(messages[0].level_tag, 'success')
# switch user context
self.client.logout()
self.client.login(username='message_user', password='<PASSWORD>')
# get response
response = self.client.get('/system/')
# get messages
messages = list(get_messages(response.wsgi_request))
# compare - user 2
self.assertEqual(str(response.context['user']), 'message_user')
self.assertEqual(
messages[0].message,
'System CSV importer: created: 3 | updated: 0 | skipped: 0 | multiple: 0 [2021-03-21 19:35:00 - 2021-03-21 19:35:00]',
)
self.assertEqual(messages[0].level_tag, 'success')
# compare - systems / attributes
compare_system_and_attributes_csv(self, '51')
# compare - IPs
compare_ips(self, '51')
def test_system_importer_file_csv_instant_ip_delimiter_comma(self):
"""test importer view"""
# change config
set_csv_import_filename(
'system_importer_file_csv_testfile_51_ip_delimiter_comma.csv'
)
# change config
set_config_ip_delimiter_comma()
# login testuser
self.client.login(
username='testuser_system_importer_file_csv_minimal',
password='<PASSWORD>',
)
# create url
destination = urllib.parse.quote('/system/', safe='/')
# get response
response = self.client.get('/system/importer/file/csv/instant/', follow=True)
# get messages
messages = list(get_messages(response.wsgi_request))
# compare - meta
self.assertRedirects(
response, destination, status_code=302, target_status_code=200
)
# compare - messages
compare_messages_csv(self, messages)
# compare - systems / attributes
compare_system_and_attributes_csv(self, '51')
# compare - IPs
compare_ips(self, '51')
def test_system_importer_file_csv_upload_post_ip_delimiter_comma(self):
"""test importer view"""
# change config
set_config_ip_delimiter_comma()
# login testuser
self.client.login(
username='testuser_system_importer_file_csv_minimal',
password='<PASSWORD>',
)
# open upload file
systemcsv = open(
os.path.join(
BASE_DIR,
'dfirtrack_main/tests/system_importer/system_importer_file_csv_files/system_importer_file_csv_testfile_51_ip_delimiter_comma.csv',
)
)
# create post data
data_dict = {
'systemcsv': systemcsv,
}
# create url
destination = urllib.parse.quote('/system/', safe='/')
# get response
response = self.client.post('/system/importer/file/csv/upload/', data_dict)
# get messages
messages = list(get_messages(response.wsgi_request))
# close file
systemcsv.close()
# compare - meta
self.assertRedirects(
response, destination, status_code=302, target_status_code=200
)
# compare - messages
compare_messages_csv(self, messages)
# compare - systems / attributes
compare_system_and_attributes_csv(self, '51')
# compare - IPs
compare_ips(self, '51')
""" ip delimiter - semicolon """
def test_system_importer_file_csv_cron_ip_delimiter_semicolon(self):
"""test importer view"""
# change config
set_csv_import_filename(
'system_importer_file_csv_testfile_52_ip_delimiter_semicolon.csv'
)
# change config
set_config_ip_delimiter_semicolon()
# mock timezone.now()
t_8 = datetime(2021, 3, 21, 19, 40, tzinfo=timezone.utc)
with patch.object(timezone, 'now', return_value=t_8):
# execute cron job / scheduled task
system_cron()
# login testuser
self.client.login(
username='testuser_system_importer_file_csv_minimal',
password='<PASSWORD>',
)
# get response
response = self.client.get('/system/')
# get messages
messages = list(get_messages(response.wsgi_request))
# compare - user 1
self.assertEqual(
str(response.context['user']), 'testuser_system_importer_file_csv_minimal'
)
self.assertEqual(
messages[0].message,
'System CSV importer: created: 3 | updated: 0 | skipped: 0 | multiple: 0 [2021-03-21 19:40:00 - 2021-03-21 19:40:00]',
)
self.assertEqual(messages[0].level_tag, 'success')
# switch user context
self.client.logout()
self.client.login(username='message_user', password='<PASSWORD>')
# get response
response = self.client.get('/system/')
# get messages
messages = list(get_messages(response.wsgi_request))
# compare - user 2
self.assertEqual(str(response.context['user']), 'message_user')
self.assertEqual(
messages[0].message,
'System CSV importer: created: 3 | updated: 0 | skipped: 0 | multiple: 0 [2021-03-21 19:40:00 - 2021-03-21 19:40:00]',
)
self.assertEqual(messages[0].level_tag, 'success')
# compare - systems / attributes
compare_system_and_attributes_csv(self, '52')
# compare - IPs
compare_ips(self, '52')
def test_system_importer_file_csv_instant_ip_delimiter_semicolon(self):
"""test importer view"""
# change config
set_csv_import_filename(
'system_importer_file_csv_testfile_52_ip_delimiter_semicolon.csv'
)
# change config
set_config_ip_delimiter_semicolon()
# login testuser
self.client.login(
username='testuser_system_importer_file_csv_minimal',
password='<PASSWORD>',
)
# create url
destination = urllib.parse.quote('/system/', safe='/')
# get response
response = self.client.get('/system/importer/file/csv/instant/', follow=True)
# get messages
messages = list(get_messages(response.wsgi_request))
# compare - meta
self.assertRedirects(
response, destination, status_code=302, target_status_code=200
)
# compare - | |
multivariate Laurent polynomial
during conversion.
INPUT:
- ``P`` -- the parent to which we want to convert.
- ``M`` -- the parent from which we want to convert.
- ``d`` -- a dictionary mapping tuples (representing the exponents)
to their coefficients. This is the dictionary corresponding to
an element of ``M``.
OUTPUT:
A dictionary corresponding to an element of ``P``.
TESTS::
sage: L.<a, b, c, d> = LaurentPolynomialRing(ZZ)
sage: M = LaurentPolynomialRing(ZZ, 'c, d')
sage: N = LaurentPolynomialRing(M, 'a, b')
sage: M(c/d + 1/c) # indirect doctest
c*d^-1 + c^-1
sage: N(a + b/c/d + 1/b) # indirect doctest
a + (c^-1*d^-1)*b + b^-1
"""
vars_P = P.variable_names()
vars_M = M.variable_names()
if not set(vars_M) & set(vars_P):
raise TypeError('no common variables')
def index(T, value):
try:
return T.index(value)
except ValueError:
return None
def value(d, R):
assert d
if len(d) == 1:
k, v = next(iter(d.items()))
if all(i == 0 for i in k):
return R(v)
return R(M(d))
group_by = tuple(index(vars_M, var) for var in vars_P)
indices = list(range(len(vars_M)))
for g in group_by:
if g is not None:
indices[g] = None
D = _split_dict_(d, indices, group_by)
try:
return {k: value(v, P.base_ring()) for k, v in D.items()}
except (ValueError, TypeError):
pass
return sum(P({k: 1}) * value(v, P) for k, v in D.items()).dict()
class LaurentPolynomialRing_generic(CommutativeRing, Parent):
"""
Laurent polynomial ring (base class).
EXAMPLES:
This base class inherits from :class:`~sage.rings.ring.CommutativeRing`.
Since :trac:`11900`, it is also initialised as such::
sage: R.<x1,x2> = LaurentPolynomialRing(QQ)
sage: R.category()
Join of Category of unique factorization domains and Category of commutative algebras over (number fields and quotient fields and metric spaces) and Category of infinite sets
sage: TestSuite(R).run()
"""
def __init__(self, R):
"""
EXAMPLES::
sage: R = LaurentPolynomialRing(QQ,2,'x')
sage: R == loads(dumps(R))
True
"""
self._n = R.ngens()
self._R = R
names = R.variable_names()
self._one_element = self.element_class(self, R.one())
CommutativeRing.__init__(self, R.base_ring(), names=names,
category=R.category())
def ngens(self):
"""
Return the number of generators of ``self``.
EXAMPLES::
sage: LaurentPolynomialRing(QQ,2,'x').ngens()
2
sage: LaurentPolynomialRing(QQ,1,'x').ngens()
1
"""
return self._n
def gen(self, i=0):
r"""
Returns the `i^{th}` generator of self. If i is not specified, then
the first generator will be returned.
EXAMPLES::
sage: LaurentPolynomialRing(QQ,2,'x').gen()
x0
sage: LaurentPolynomialRing(QQ,2,'x').gen(0)
x0
sage: LaurentPolynomialRing(QQ,2,'x').gen(1)
x1
TESTS::
sage: LaurentPolynomialRing(QQ,2,'x').gen(3)
Traceback (most recent call last):
...
ValueError: generator not defined
"""
if i < 0 or i >= self._n:
raise ValueError("generator not defined")
try:
return self.__generators[i]
except AttributeError:
self.__generators = tuple(self(x) for x in self._R.gens())
return self.__generators[i]
def variable_names_recursive(self, depth=infinity):
r"""
Return the list of variable names of this ring and its base rings,
as if it were a single multi-variate Laurent polynomial.
INPUT:
- ``depth`` -- an integer or :mod:`Infinity <sage.rings.infinity>`.
OUTPUT:
A tuple of strings.
EXAMPLES::
sage: T = LaurentPolynomialRing(QQ, 'x')
sage: S = LaurentPolynomialRing(T, 'y')
sage: R = LaurentPolynomialRing(S, 'z')
sage: R.variable_names_recursive()
('x', 'y', 'z')
sage: R.variable_names_recursive(2)
('y', 'z')
"""
if depth <= 0:
return ()
elif depth == 1:
return self.variable_names()
else:
my_vars = self.variable_names()
try:
return self.base_ring().variable_names_recursive(depth - len(my_vars)) + my_vars
except AttributeError:
return my_vars
def is_integral_domain(self, proof = True):
"""
Returns True if self is an integral domain.
EXAMPLES::
sage: LaurentPolynomialRing(QQ,2,'x').is_integral_domain()
True
The following used to fail; see :trac:`7530`::
sage: L = LaurentPolynomialRing(ZZ, 'X')
sage: L['Y']
Univariate Polynomial Ring in Y over Univariate Laurent Polynomial Ring in X over Integer Ring
"""
return self.base_ring().is_integral_domain(proof)
def is_noetherian(self):
"""
Returns True if self is Noetherian.
EXAMPLES::
sage: LaurentPolynomialRing(QQ,2,'x').is_noetherian()
Traceback (most recent call last):
...
NotImplementedError
"""
raise NotImplementedError
def construction(self):
"""
Return the construction of ``self``.
EXAMPLES::
sage: LaurentPolynomialRing(QQ,2,'x,y').construction()
(LaurentPolynomialFunctor,
Univariate Laurent Polynomial Ring in x over Rational Field)
"""
from sage.categories.pushout import LaurentPolynomialFunctor
vars = self.variable_names()
if len(vars) == 1:
return LaurentPolynomialFunctor(vars[0], False), self.base_ring()
else:
return LaurentPolynomialFunctor(vars[-1], True), LaurentPolynomialRing(self.base_ring(), vars[:-1])
def completion(self, p, prec=20, extras=None):
"""
EXAMPLES::
sage: P.<x>=LaurentPolynomialRing(QQ)
sage: P
Univariate Laurent Polynomial Ring in x over Rational Field
sage: PP=P.completion(x)
sage: PP
Laurent Series Ring in x over Rational Field
sage: f=1-1/x
sage: PP(f)
-x^-1 + 1
sage: 1/PP(f)
-x - x^2 - x^3 - x^4 - x^5 - x^6 - x^7 - x^8 - x^9 - x^10 - x^11 - x^12 - x^13 - x^14 - x^15 - x^16 - x^17 - x^18 - x^19 - x^20 + O(x^21)
TESTS:
Check that the precision is taken into account (:trac:`24431`)::
sage: L = LaurentPolynomialRing(QQ, 'x')
sage: L.completion('x', 100).default_prec()
100
sage: L.completion('x', 20).default_prec()
20
"""
if str(p) == self._names[0] and self._n == 1:
from sage.rings.laurent_series_ring import LaurentSeriesRing
R = self.polynomial_ring().completion(self._names[0], prec)
return LaurentSeriesRing(R)
else:
raise TypeError("Cannot complete %s with respect to %s" % (self, p))
def remove_var(self, var):
"""
EXAMPLES::
sage: R = LaurentPolynomialRing(QQ,'x,y,z')
sage: R.remove_var('x')
Multivariate Laurent Polynomial Ring in y, z over Rational Field
sage: R.remove_var('x').remove_var('y')
Univariate Laurent Polynomial Ring in z over Rational Field
"""
vars = list(self.variable_names())
vars.remove(str(var))
return LaurentPolynomialRing(self.base_ring(), vars)
def _coerce_map_from_(self, R):
"""
EXAMPLES::
sage: L.<x,y> = LaurentPolynomialRing(QQ)
sage: L.coerce_map_from(QQ)
Generic morphism:
From: Rational Field
To: Multivariate Laurent Polynomial Ring in x, y over Rational Field
Let us check that coercion between Laurent Polynomials over
different base rings works (:trac:`15345`)::
sage: R = LaurentPolynomialRing(ZZ, 'x')
sage: T = LaurentPolynomialRing(QQ, 'x')
sage: R.gen() + 3*T.gen()
4*x
"""
if R is self._R:
return self._generic_coerce_map(R)
f = self._coerce_map_via([self._R], R)
if f is not None:
return f
if (isinstance(R, LaurentPolynomialRing_generic)
and self._R.has_coerce_map_from(R._R)):
return self._generic_coerce_map(R)
def __eq__(self, right):
"""
Check whether ``self`` is equal to ``right``.
EXAMPLES::
sage: R = LaurentPolynomialRing(QQ,'x,y,z')
sage: P = LaurentPolynomialRing(ZZ,'x,y,z')
sage: Q = LaurentPolynomialRing(QQ,'x,y')
sage: R == R
True
sage: R == Q
False
sage: Q == P
False
sage: P == R
False
"""
if type(self) != type(right):
return False
return self._R == right._R
def __ne__(self, other):
"""
Check whether ``self`` is not equal to ``other``.
EXAMPLES::
sage: R = LaurentPolynomialRing(QQ,'x,y,z')
sage: P = LaurentPolynomialRing(ZZ,'x,y,z')
sage: Q = LaurentPolynomialRing(QQ,'x,y')
sage: R != R
False
sage: R != Q
True
sage: Q != P
True
sage: P != R
True
"""
return not (self == other)
def __hash__(self):
"""
Return the hash of ``self``.
EXAMPLES::
sage: h1 = hash(LaurentPolynomialRing(ZZ,'x,y,z'))
sage: h2 = hash(LaurentPolynomialRing(ZZ,'x,y,z'))
sage: h3 = hash(LaurentPolynomialRing(QQ,'x,y,z'))
sage: h4 = hash(LaurentPolynomialRing(ZZ,'x,y'))
sage: h1 == h2 and h1 != h3 and h1 != h4
True
"""
return hash(self._R) ^ 12059065606945654693
def _latex_(self):
r"""
EXAMPLES::
sage: latex(LaurentPolynomialRing(QQ,2,'x'))
\Bold{Q}[x_{0}^{\pm 1}, x_{1}^{\pm 1}]
"""
vars = ', '.join(a + r'^{\pm 1}' for a in self.latex_variable_names())
return "%s[%s]" % (latex(self.base_ring()), vars)
def _ideal_class_(self, n=0):
"""
EXAMPLES::
sage: LaurentPolynomialRing(QQ,2,'x')._ideal_class_()
Traceback (most recent call last):
...
NotImplementedError
"""
# One may eventually want ideals in these guys.
raise NotImplementedError
def ideal(self, *args, **kwds):
"""
EXAMPLES::
sage: LaurentPolynomialRing(QQ,2,'x').ideal([1])
Traceback (most recent call last):
...
NotImplementedError
TESTS:
check that :trac:`26421` is fixed:
sage: R.<t> = LaurentPolynomialRing(ZZ)
sage: P.<x> = PolynomialRing(R)
sage: p = x-t
sage: p.content_ideal() # indirect doctest
Traceback (most recent call last):
...
NotImplementedError
"""
raise NotImplementedError
def _is_valid_homomorphism_(self, codomain, im_gens, base_map=None):
"""
EXAMPLES::
sage: T.<t> = ZZ[]
sage: K.<i> = NumberField(t^2 + 1)
sage: L.<x,y> = LaurentPolynomialRing(K)
sage: L._is_valid_homomorphism_(K, (K(1/2), K(3/2)))
True
sage: Q5 = Qp(5); i5 = Q5(-1).sqrt()
sage: L._is_valid_homomorphism_(Q5, (Q5(1/2), Q5(3/2))) # no coercion
False
sage: L._is_valid_homomorphism_(Q5, (Q5(1/2), Q5(3/2)), base_map=K.hom([i5]))
True
"""
if base_map is None and not codomain.has_coerce_map_from(self.base_ring()):
# we need that elements of the base ring
# canonically coerce into codomain.
return False
for a in im_gens:
# in addition, the image of each generator must be invertible.
if not a.is_unit():
return False
return True
def term_order(self):
"""
Returns the term order of self.
EXAMPLES::
sage: LaurentPolynomialRing(QQ,2,'x').term_order()
Degree reverse lexicographic term order
"""
return self._R.term_order()
def is_finite(self):
"""
EXAMPLES::
sage: LaurentPolynomialRing(QQ,2,'x').is_finite()
False
"""
return False
def is_field(self, proof = True):
"""
EXAMPLES::
sage: LaurentPolynomialRing(QQ,2,'x').is_field()
False
"""
return False
def polynomial_ring(self):
"""
Returns the polynomial ring associated with self.
EXAMPLES::
sage: LaurentPolynomialRing(QQ,2,'x').polynomial_ring()
Multivariate Polynomial Ring in x0, x1 over Rational Field
sage: LaurentPolynomialRing(QQ,1,'x').polynomial_ring()
Multivariate Polynomial Ring in x over Rational Field
"""
return self._R
def characteristic(self):
"""
| |
<reponame>jiayiliu/gradio
"""
This module defines various classes that can serve as the `input` to an interface. Each class must inherit from
`InputComponent`, and each class must define a path to its template. All of the subclasses of `InputComponent` are
automatically added to a registry, which allows them to be easily referenced in other parts of the code.
"""
import datetime
import json
import os
import time
import warnings
from gradio.component import Component
import base64
import numpy as np
import PIL
import scipy.io.wavfile
from gradio import processing_utils, test_data
import pandas as pd
import math
import tempfile
class InputComponent(Component):
"""
Input Component. All input components subclass this.
"""
pass
class Textbox(InputComponent):
"""
Component creates a textbox for user to enter input. Provides a string (or number is `type` is "float") as an argument to the wrapped function.
Input type: str
"""
def __init__(self, lines=1, placeholder=None, default=None, numeric=False, type="str", label=None):
"""
Parameters:
lines (int): number of line rows to provide in textarea.
placeholder (str): placeholder hint to provide behind textarea.
default (str): default text to provide in textarea.
numeric (bool): DEPRECATED. Whether the input should be parsed as a number instead of a string.
type (str): Type of value to be returned by component. "str" returns a string, "number" returns a float value.
label (str): component name in interface.
"""
self.lines = lines
self.placeholder = placeholder
self.default = default
if numeric:
warnings.warn("The 'numeric' parameter has been deprecated. Set parameter 'type' to 'number' instead.", DeprecationWarning)
self.type = "number"
else:
self.type = type
if default is None:
self.test_input = {
"str": "the quick brown fox jumped over the lazy dog",
"number": 786.92,
}[type]
else:
self.test_input = default
super().__init__(label)
def get_template_context(self):
return {
"lines": self.lines,
"placeholder": self.placeholder,
"default": self.default,
**super().get_template_context()
}
@classmethod
def get_shortcut_implementations(cls):
return {
"text": {},
"textbox": {"lines": 7},
"number": {"type": "number"}
}
def preprocess(self, x):
if self.type == "str":
return x
elif self.type == "number":
return float(x)
else:
raise ValueError("Unknown type: " + str(self.type) + ". Please choose from: 'str', 'number'.")
class Slider(InputComponent):
"""
Component creates a slider that ranges from `minimum` to `maximum`. Provides a number as an argument to the wrapped function.
Input type: float
"""
def __init__(self, minimum=0, maximum=100, step=None, default=None, label=None):
'''
Parameters:
minimum (float): minimum value for slider.
maximum (float): maximum value for slider.
step (float): increment between slider values.
default (float): default value.
label (str): component name in interface.
'''
self.minimum = minimum
self.maximum = maximum
if step is None:
difference = maximum - minimum
power = math.floor(math.log10(difference) - 1)
step = 10 ** power
self.step = step
self.default = minimum if default is None else default
self.test_input = self.default
super().__init__(label)
def get_template_context(self):
return {
"minimum": self.minimum,
"maximum": self.maximum,
"step": self.step,
"default": self.default,
**super().get_template_context()
}
@classmethod
def get_shortcut_implementations(cls):
return {
"slider": {},
}
class Checkbox(InputComponent):
"""
Component creates a checkbox that can be set to `True` or `False`. Provides a boolean as an argument to the wrapped function.
Input type: bool
"""
def __init__(self, label=None):
"""
Parameters:
label (str): component name in interface.
"""
self.test_input = True
super().__init__(label)
@classmethod
def get_shortcut_implementations(cls):
return {
"checkbox": {},
}
class CheckboxGroup(InputComponent):
"""
Component creates a set of checkboxes of which a subset can be selected. Provides a list of strings representing the selected choices as an argument to the wrapped function.
Input type: Union[List[str], List[int]]
"""
def __init__(self, choices, type="value", label=None):
'''
Parameters:
choices (List[str]): list of options to select from.
type (str): Type of value to be returned by component. "value" returns the list of strings of the choices selected, "index" returns the list of indicies of the choices selected.
label (str): component name in interface.
'''
self.choices = choices
self.type = type
self.test_input = self.choices
super().__init__(label)
def get_template_context(self):
return {
"choices": self.choices,
**super().get_template_context()
}
def preprocess(self, x):
if self.type == "value":
return x
elif self.type == "index":
return [self.choices.index(choice) for choice in x]
else:
raise ValueError("Unknown type: " + str(self.type) + ". Please choose from: 'value', 'index'.")
class Radio(InputComponent):
"""
Component creates a set of radio buttons of which only one can be selected. Provides string representing selected choice as an argument to the wrapped function.
Input type: Union[str, int]
"""
def __init__(self, choices, type="value", label=None):
'''
Parameters:
choices (List[str]): list of options to select from.
type (str): Type of value to be returned by component. "value" returns the string of the choice selected, "index" returns the index of the choice selected.
label (str): component name in interface.
'''
self.choices = choices
self.type = type
self.test_input = self.choices[0]
super().__init__(label)
def get_template_context(self):
return {
"choices": self.choices,
**super().get_template_context()
}
def preprocess(self, x):
if self.type == "value":
return x
elif self.type == "index":
return self.choices.index(x)
else:
raise ValueError("Unknown type: " + str(self.type) + ". Please choose from: 'value', 'index'.")
class Dropdown(InputComponent):
"""
Component creates a dropdown of which only one can be selected. Provides string representing selected choice as an argument to the wrapped function.
Input type: Union[str, int]
"""
def __init__(self, choices, type="value", label=None):
'''
Parameters:
choices (List[str]): list of options to select from.
type (str): Type of value to be returned by component. "value" returns the string of the choice selected, "index" returns the index of the choice selected.
label (str): component name in interface.
'''
self.choices = choices
self.type = type
self.test_input = self.choices[0]
super().__init__(label)
def get_template_context(self):
return {
"choices": self.choices,
**super().get_template_context()
}
def preprocess(self, x):
if self.type == "value":
return x
elif self.type == "index":
return self.choices.index(x)
else:
raise ValueError("Unknown type: " + str(self.type) + ". Please choose from: 'value', 'index'.")
class Image(InputComponent):
"""
Component creates an image upload box with editing capabilities.
Input type: Union[numpy.array, PIL.Image, str]
"""
def __init__(self, shape=None, image_mode='RGB', invert_colors=False, source="upload", tool="editor", type="numpy", label=None):
'''
Parameters:
shape (Tuple[int, int]): shape to crop and resize image to; if None, matches input image size.
image_mode (str): "RGB" if color, or "L" if black and white.
invert_colors (bool): whether to invert the image as a preprocessing step.
source (str): Source of image. "upload" creates a box where user can drop an image file, "webcam" allows user to take snapshot from their webcam, "canvas" defaults to a white image that can be edited and drawn upon with tools.
tool (str): Tools used for editing. "editor" allows a full screen editor, "select" provides a cropping and zoom tool.
type (str): Type of value to be returned by component. "numpy" returns a numpy array with shape (width, height, 3) and values from 0 to 255, "pil" returns a PIL image object, "file" returns a temporary file object whose path can be retrieved by file_obj.name.
label (str): component name in interface.
'''
self.shape = shape
self.image_mode = image_mode
self.source = source
self.tool = tool
self.type = type
self.invert_colors = invert_colors
self.test_input = test_data.BASE64_IMAGE
super().__init__(label)
@classmethod
def get_shortcut_implementations(cls):
return {
"image": {},
"webcam": {"source": "webcam"},
"sketchpad": {"image_mode": "L", "source": "canvas", "shape": (28, 28), "invert_colors": True},
}
def get_template_context(self):
return {
"image_mode": self.image_mode,
"source": self.source,
"tool": self.tool,
**super().get_template_context()
}
def preprocess(self, x):
im = processing_utils.decode_base64_to_image(x)
fmt = im.format
with warnings.catch_warnings():
warnings.simplefilter("ignore")
im = im.convert(self.image_mode)
if self.shape is not None:
im = processing_utils.resize_and_crop(
im, (self.shape[0], self.shape[1]))
if self.invert_colors:
im = PIL.ImageOps.invert(im)
if self.type == "pil":
return im
elif self.type == "numpy":
return np.array(im)
elif self.type == "file":
file_obj = tempfile.NamedTemporaryFile(suffix="."+fmt)
im.save(file_obj.name)
return file_obj
else:
raise ValueError("Unknown type: " + str(self.type) + ". Please choose from: 'numpy', 'pil', 'file'.")
def rebuild(self, dir, data):
"""
Default rebuild method to decode a base64 image
"""
im = processing_utils.decode_base64_to_image(data)
timestamp = datetime.datetime.now()
filename = f'input_{timestamp.strftime("%Y-%m-%d-%H-%M-%S")}.png'
im.save(f'{dir}/{filename}', 'PNG')
return filename
class Audio(InputComponent):
"""
Component accepts audio input files.
Input type: Union[Tuple[int, numpy.array], str, numpy.array]
"""
def __init__(self, source="upload", type="numpy", label=None):
"""
Parameters:
source (str): Source of audio. "upload" creates a box where user can drop an audio file, "microphone" creates a microphone input.
type (str): Type of value to be returned by component. "numpy" returns a 2-set tuple with an integer sample_rate and the data numpy.array of shape (samples, 2), "file" returns a | |
"""
인터넷에서 유용한 정보를 가져옵니다.
Class:
:obj:`~openpibo.collect.Wikipedia`
Functions:
:meth:`~openpibo.collect.Wikipedia.search`
Class:
:obj:`~openpibo.collect.Weather`
Functions:
:meth:`~openpibo.collect.Weather.search`
Class:
:obj:`~openpibo.collect.News`
Functions:
:meth:`~openpibo.collect.News.search`
**단어정보, 날씨 정보, 뉴스 정보** 를 가져올 수 있습니다.
"""
from urllib.parse import quote
from .modules.collect.get_soup import get_soup
class _Chapter:
"""위키백과에서의 한 쳅터에 대한 클래스입니다."""
def __init__(self, title):
self._title = title
self._content = ''
self._parent = None
def init_content(self):
self._content = ''
def add_content(self, content):
self._content += content
def set_parent(self, parent_node):
self._parent = parent_node
class Wikipedia:
"""
위키백과에서 단어를 검색합니다.
example::
from openpibo.collect import Wikipedia
pibo_wiki = Wikipedia()
# 아래의 모든 예제 이전에 위 코드를 먼저 사용합니다.
"""
def __init__(self):
self._summary = ''
self._chapters = {'0': _Chapter('개요')}
self._titles = []
def __str__(self):
"""클래스를 출력하면 첫 번째 쳅터(주로 '개요')의 내용을 출력합니다.
example::
print(pibo_wiki)
>>> 강아지 (dog)는 개의 새끼를 일컫는다 ..."""
return self._chapters['0']._content
def search(self, search_text: str):
"""
위키백과에서 ``search_text`` 를 검색합니다.
print 인스턴스(pibo_wiki)로 개요 정보를 출력할 수 있습니다.
example::
pibo_wiki.search('강아지')
print(pibo_wiki)
# 강아지 (dog)는 개의 새끼를 일컫는다 ...
만약 검색 결과가 없으면, 다음과 같이 출력됩니다::
pibo_wiki.search('깡아지')
print(pibo_wiki)
# '깡아지'에 대한 검색결과가 없습니다.
:param str search_text: 위키백과에서의 검색어
:returns: 내용이 dictionary 배열 형태로 출력됩니다.
example::
[{
'title': '명칭',
'content': "한국어 ‘강아지’는 ‘개’에 어린 짐승을 뜻하는 ‘아지’가 붙은 말이다..."
}]
"""
self._chapters = {'0': _Chapter('개요')}
self._titles = []
encode_text = quote(search_text)
url = f'https://ko.wikipedia.org/wiki/{encode_text}'
soup = get_soup(url)
total_content = soup.find('div', {'class': 'mw-parser-output'})
if not total_content:
self._chapters['0'].add_content(f"'{search_text}'에 대한 검색결과가 없습니다.")
return
# chapter_idx = '3.1', chapter_list = [3, 1]
chapter_idx, chapter_list = '0', [0]
parent_num = 0
for content in total_content:
tag = content.name
if tag == None:
continue
elif tag[0] == 'h':
new_parent_num = int(tag[1]) - 2
if new_parent_num <= parent_num:
chapter_list = chapter_list[:new_parent_num+1]
chapter_list[-1] += 1
else:
chapter_list.append(1)
chapter_idx = '.'.join(map(str, chapter_list))
self._chapters[chapter_idx] = _Chapter(title=content.text.split('[')[0])
self._titles.append(content.text)
elif tag == 'p':
self._chapters[chapter_idx].add_content(content.text)
elif tag == 'ul':
self._chapters[chapter_idx].add_content(content.text)
return self._chapters
def get_list(self):
"""
챕터의 목록을 list 형태로 가져옵니다.
example::
pibo_wiki.get_list()
:returns: list 형태의 챕터 목록입니다.
list 안에 str 타입의 챕터 번호가 기록됩니다.
example::
['0', '1', '2', '3', '4', '5', '6']
"""
return list(self._chapters.keys())
def get(self, chapter_num):
"""
``chapter_num`` 에 해당하는 내용을 출력합니다.
example::
pibo_wiki.get('1')
:param str chapter_num: 챕터의 번호
``1.3.1`` 과 같이 표현되기 때문에 int 또는 float 타입이 아닌 **str 타입** 입니다.
:returns: 해당 챕터 번호에 해당하는 내용이 dictionary 형태로 출력됩니다.
example::
{
'title': '명칭',
'content': "한국어 ‘강아지’는 ‘개’에 어린 짐승을 뜻하는 ‘아지’가 붙은 말이다..."
}
"""
title = self._chapters[chapter_num]._title
content = self._chapters[chapter_num]._content
if not content:
content = '(내용이 존재하지 않습니다.)'
result = {}
result['title'] = title
result['content'] = content
return result
region_table = {
'전국': 108,
'서울': 109,
'인천': 109,
'경기': 109,
'부산': 159,
'울산': 159,
'경남': 159,
'대구': 143,
'경북': 143,
'광주': 156,
'전남': 156,
'전북': 146,
'대전': 133,
'세종': 133,
'충남': 133,
'충북': 131,
'강원': 105,
'제주': 184
}
class Weather:
"""
오늘, 내일, 모레의 날씨 정보를 가져옵니다.
example::
from openpibo.collect import Weather
pibo_weather = Weather()
# 아래의 모든 예제 이전에 위 코드를 먼저 사용합니다."""
def __init__(self):
"""해당 지역의 날씨를 가져옵니다."""
self._region = None
self._forecast = ""
self._today = {}
self._tomorrow = {}
self._after_tomorrow = {}
def __str__(self):
"""해당 지역의 단기예보를 반환합니다."""
if self._forecast:
return self._forecast
return "검색된 지역이 없습니다. '.search()' 를 하십시오."
def search(self, region:str='전국'):
"""
해당 지역의 날씨 정보를 가져와서 인스턴스(pibo_weather)에 저장합니다.
``get_today``, ``get_tomorrow``, ``get_after_tomorrow`` 메소드로 날씨 정보를 출력할 수 있습니다.
print 인스턴스(pibo_weather)로 전체적인 날씨를 출력할 수 있습니다.
example::
pibo_weather.search('서울')
print(pibo_weather)
# 내일 경기남부 가끔 비, 내일까지 바람 약간 강, 낮과 밤의 기온차 큼
:param str region: 검색 하려는 지역 (default: 전국)
검색할 수 있는 지역은 다음과 같습니다::
'전국', '서울', '인천', '경기', '부산', '울산', '경남', '대구', '경북',
'광주', '전남', '전북', '대전', '세종', '충남', '충북', '강원', '제주'
:returns: 오늘/내일/모레의 날씨 및 최저/최고기온을 반환합니다.
example::
{
'today':
{
'weather': '전국 대체로 흐림',
'minimum_temp': '15.3 ~ 21.6',
'highst_temp': '23.1 ~ 27.6'
}
'tomorrow':
{
'weather': '전국 대체로 흐림',
'minimum_temp': '15.3 ~ 21.6',
'highst_temp': '23.1 ~ 27.6'
}
'after_tomorrow':
{
'weather': '전국 대체로 흐림',
'minimum_temp': '15.3 ~ 21.6',
'highst_temp': '23.1 ~ 27.6'
}
}
"""
self._region = region
self._forecast = ''
self._today = {}
self._tomorrow = {}
self._after_tomorrow = {}
try:
region_num = region_table[region]
except:
raise Exception(f"""'region'에 들어갈 수 있는 단어는 다음과 같습니다.\n\t{tuple(region_table.keys())}""")
url = f'https://www.weather.go.kr/w/weather/forecast/short-term.do?stnId={region_num}'
soup = get_soup(url)
forecasts = soup.find('div', {'class': 'cmp-view-content'}).text
forecasts = forecasts.split('□')[1].split('○')
for forecast in map(str.strip, forecasts):
split_point = forecast.index(')')
date = forecast[:split_point+1]
desc = forecast[split_point+2:]
if '종합' in date:
self._forecast = desc
if '오늘' in date:
self._today['weather'] = desc
if '내일' in date or '~' in date:
self._tomorrow['weather'] = desc
if '모레' in date:
self._after_tomorrow['weather'] = desc
temp_table = soup.find('tbody')
all_temps = list(map(lambda x: x.text, temp_table.select('td')[:10]))
self._today['minimum_temp'], self._tomorrow['minimum_temp'], self._after_tomorrow['minimum_temp'] = all_temps[2:5]
self._today['highst_temp'], self._tomorrow['highst_temp'], self._after_tomorrow['highst_temp'] = all_temps[7:10]
return {'today':self._today, 'tomorrow':self._tomorrow, 'after_tomorrow':self._after_tomorrow}
def get_today(self):
"""
오늘의 날씨를 반환합니다.
example::
pibo_weather.get_today()
:returns: 오늘의 날씨 및 최저/최고기온을 반환합니다.
example::
{
'weather': '전국 대체로 흐림',
'minimum_temp': '15.3 ~ 21.6',
'highst_temp': '23.1 ~ 27.6'
}
"""
return self._today
def get_tomorrow(self):
"""
내일의 날씨를 반환합니다.
example::
pibo_weather.get_tomorrow()
:returns: 내일의 날씨 및 최저/최고기온을 반환합니다.
example::
{
'weather': '전국 대체로 흐리고 비, 낮에 남부지방과 제주도부터 차차 그침',
'minimum_temp': '16 ~ 24',
'highst_temp': '20.8 ~ 26.6'
}
"""
return self._tomorrow
def get_after_tomorrow(self):
"""
모레의 날씨를 반환합니다.
example::
pibo_weather.get_after_tomorrow()
:returns: 모레의 날씨 및 최저/최고기온을 반환합니다.
example::
{
'weather': '전국 대체로 흐리다가 오후에 서쪽지방부터 차차 맑아짐',
'minimum_temp': '17 ~ 22',
'highst_temp': '22 ~ 30'
}
"""
return self._after_tomorrow
topic_table = {
'속보': 'newsflash',
'정치': 'politics',
'경제': 'economy',
'사회': 'society',
'국제': 'international',
'문화': 'culture',
'연예': 'entertainment',
'스포츠': 'sports',
'풀영상': 'fullvideo',
'뉴스랭킹': 'newsrank',
'뉴스룸': 'newsroom',
'아침&': 'morningand',
'썰전 라이브': 'ssulzunlive',
'정치부회의': 'politicaldesk',
}
class News:
"""
JTBC 뉴스 RSS 서비스를 사용해 뉴스 자료를 가져옵니다.
example::
from openpibo.collect import News
pibo_news = News()
# 아래의 모든 예제 이전에 위 코드를 먼저 사용합니다.
"""
def __init__(self):
f"""'topic'에는 아래와 같은 단어가 들어갈 수 있습니다.\n\t{tuple(topic_table.keys())}"""
self._topic = ''
self._articles = []
self._titles = []
def __str__(self):
"""가장 최근 기사의 제목을 반환합니다."""
if self._titles:
return self._titles[0]
return "검색된 주제가 없습니다. '.search()' 를 하십시오."
def search(self, topic='뉴스랭킹'):
"""
주제에 맞는 뉴스를 검색하여 인스턴스(pibo_news)에 저장합니다.
print 인스턴스(pibo_news)로 첫 번째 뉴스 헤드라인을 출력할 수 있습니다.
example::
pibo_news.search('속보')
print(pibo_news)
# JTBC, 파일럿 예능-특선 영화로 꽉 채운 추석 라인업 공개
:param str topic: 검색할 뉴스 주제
다음과 같은 단어가 들어갈 수 있습니다.
* ``속보``
* ``정치``
* ``경제``
* ``사회``
* ``국제``
* ``문화``
* ``연예``
* ``스포츠``
* ``풀영상``
* ``뉴스랭킹``
* ``뉴스룸``
* ``아침&``
* ``썰전 라이브``
* ``정치부회의``
:returns: title, link, description, pubDate 요소가 있는 dictionary 배열입니다.
example::
[
{
'title': '또 소방차 막은 불법주차, 이번엔 가차없이 밀어버렸다',
'link': 'https://news.jtbc.joins.com/article/article.aspx?...',
'description': '2019년 4월 소방당국의 불법주정차 강경대응 훈련 모습...,
'pubDate': '2021.09.03'
},
]
"""
self._topic = topic
self._articles = []
self._titles = []
topic_code = topic_table[topic]
url = f'https://fs.jtbc.joins.com//RSS/{topic_code}.xml'
soup = get_soup(url, 'xml')
items = soup.findAll('item')
for item in items:
title = item.find('title').text
link = item.find('link').text
description = item.find('description').text
pubDate = item.find('pubDate').text
article = {
'title': title,
'link': link,
'description': description,
'pubDate': pubDate
}
self._articles.append(article)
self._titles.append(title)
return self._articles
def get_titles(self):
"""
기사 제목 목록을 모두 보여줍니다.
example::
pibo_news.get_titles()
:returns: ``key=기사번호`` , ``value=title`` 인 dictionary 입니다.
example::
{
0: '또 소방차 막은 불법주차, 이번엔 가차없이 밀어버렸다',
1: '"죽은 줄 알았던 11살 아들, 40살이 돼 돌아왔습니다"',
2: "이런 영웅은 처음이지?…마블 '아시아 히어로' 통할까",
...
19: "[크로스체크] 5시간 새 100배 증식…'식중독 주범' 살모넬라균 추적"
}
"""
article_mapping = {}
for idx, title in enumerate(self._titles):
article_mapping[idx] = title
return article_mapping
def get_article(self, article_idx):
"""
기사 번호에 해당하는 기사 정보를 보여줍니다.
example::
pibo_news.get_article(1)
:param int article_idx: 기사 번호
기사 번호는 0~19 사이 int 타입 입니다.
:returns: title, link, description, pubDate 요소가 있는 dictionary 입니다.
example::
{
'title': '또 소방차 막은 불법주차, 이번엔 가차없이 밀어버렸다',
'link': 'https://news.jtbc.joins.com/article/article.aspx?...',
'description': '2019년 4월 소방당국의 불법주정차 강경대응 훈련 모습...,
'pubDate': '2021.09.03'
| |
<reponame>IcyW/SMIIP
#!/usr/bin/env python
# Copyright 2016 Johns Hopkins University (author: <NAME>)
# Apache 2.0.
from __future__ import print_function
import argparse
import sys, os
from collections import defaultdict
parser = argparse.ArgumentParser(description="This script reads stats created in analyze_alignments.sh "
"to print information about phone lengths in alignments. It's principally "
"useful in order to see whether there is a reasonable amount of silence "
"at the beginning and ends of segments. The normal output of this script "
"is written to the standard output and is human readable (on crashes, "
"we'll print an error to stderr.")
parser.add_argument("--frequency-cutoff-percentage", type = float,
default = 0.5, help="Cutoff, expressed as a percentage "
"(between 0 and 100), of frequency at which we print stats "
"for a phone.")
parser.add_argument("lang",
help="Language directory, e.g. data/lang.")
args = parser.parse_args()
# set up phone_int2text to map from phone to printed form.
phone_int2text = {}
try:
f = open(args.lang + "/phones.txt", "r");
for line in f.readlines():
[ word, number] = line.split()
phone_int2text[int(number)] = word
f.close()
except:
sys.exit("analyze_phone_length_stats.py: error opening or reading {0}/phones.txt".format(
args.lang))
# this is a special case... for begin- and end-of-sentence stats,
# we group all nonsilence phones together.
phone_int2text[0] = 'nonsilence'
# populate the set 'nonsilence', which will contain the integer phone-ids of
# nonsilence phones (and disambig phones, which won't matter).
nonsilence = set(phone_int2text.keys())
nonsilence.remove(0)
try:
# open lang/phones/silence.csl-- while there are many ways of obtaining the
# silence/nonsilence phones, we read this because it's present in graph
# directories as well as lang directories.
filename = "{0}/phones/silence.csl".format(args.lang)
f = open(filename, "r")
line = f.readline()
f.close()
for silence_phone in line.split(":"):
nonsilence.remove(int(silence_phone))
except Exception as e:
sys.exit("analyze_phone_length_stats.py: error processing {0}/phones/silence.csl: {1}".format(
args.lang, str(e)))
# phone_length is a dict of dicts of dicts;
# it's indexed
# phone_length[boundary_type][
# phone_lengths[boundary_type] for boundary_type in [ 'begin', 'end', 'all' ] is
# a dict from a 2-tuple (phone, length) to a count of occurrences, where phone is
# an integer phone-id, and length is the length of the phone instance in frames.
# note: for the 'begin' and 'end' boundary-types, we group all nonsilence phones
# into phone-id zero.
phone_lengths = dict()
for boundary_type in [ 'begin', 'end', 'all' ]:
phone_lengths[boundary_type] = dict()
for p in phone_int2text.keys():
phone_lengths[boundary_type][p] = defaultdict(int)
# total_phones is a dict from boundary_type to total count [of phone occurrences]
total_phones = defaultdict(int)
# total_frames is a dict from boundary_type to total number of frames.
total_frames = defaultdict(int)
# total_frames is a dict from num-frames to count of num-utterances with that
# num-frames.
while True:
line = sys.stdin.readline()
if line == '':
break
a = line.split()
if len(a) != 4:
sys.exit("analyze_phone_length_stats.py: reading stdin, could not interpret line: " + line)
try:
print(a)
count, boundary_type, phone, length = a
print('1')
total_phones[boundary_type] += int(count)
total_frames[boundary_type] += int(count) * int(length)
phone_lengths[boundary_type][int(phone)][int(length)] += int(count)
print(a)
if int(phone) in nonsilence:
nonsilence_phone = 0
phone_lengths[boundary_type][nonsilence_phone][int(length)] += int(count)
print('2')
except Exception as e:
sys.exit("analyze_phone_length_stats.py: unexpected phone {0} "
"seen (lang directory mismatch?): {1}".format(phone, str(e)))
if len(phone_lengths) == 0:
sys.exit("analyze_phone_length_stats.py: read no input")
# work out the optional-silence phone
try:
f = open(args.lang + "/phones/optional_silence.int", "r")
optional_silence_phone = int(f.readline())
optional_silence_phone_text = phone_int2text[optional_silence_phone]
f.close()
if optional_silence_phone in nonsilence:
print("analyze_phone_length_stats.py: was expecting the optional-silence phone to "
"be a member of the silence phones, it is not. This script won't work correctly.")
except:
largest_count = 0
optional_silence_phone = 1
for p in phone_int2text.keys():
if p > 0 and not p in nonsilence:
this_count = sum([ l * c for l,c in phone_lengths['all'][p].items() ])
if this_count > largest_count:
largest_count = this_count
optional_silence_phone = p
optional_silence_phone_text = phone_int2text[optional_silence_phone]
print("analyze_phone_length_stats.py: could not get optional-silence phone from "
"{0}/phones/optional_silence.int, guessing that it's {1} from the stats. ".format(
args.lang, optional_silence_phone_text))
# If length_to_count is a map from length-in-frames to count,
# return the length-in-frames that equals the (fraction * 100)'th
# percentile of the distribution.
def GetPercentile(length_to_count, fraction):
total_phones = sum(length_to_count.values())
if total_phones == 0:
return 0
else:
items = sorted(length_to_count.items())
count_cutoff = int(fraction * total_phones)
cur_count_total = 0
for length,count in items:
assert count >= 0
cur_count_total += count
if cur_count_total >= count_cutoff:
return length
assert false # we shouldn't reach here.
def GetMean(length_to_count):
total_phones = sum(length_to_count.values())
if total_phones == 0:
return 0.0
total_frames = sum([ float(l * c) for l,c in length_to_count.items() ])
return total_frames / total_phones
# Analyze frequency, median and mean of optional-silence at beginning and end of utterances.
# The next block will print something like
# "At utterance begin, SIL is seen 15.0% of the time; when seen, duration (median, mean) is (5, 7.6) frames."
# "At utterance end, SIL is seen 14.6% of the time; when seen, duration (median, mean) is (4, 6.1) frames."
# This block will print warnings if silence is seen less than 80% of the time at utterance
# beginning and end.
for boundary_type in 'begin', 'end':
phone_to_lengths = phone_lengths[boundary_type]
num_utterances = total_phones[boundary_type]
assert num_utterances > 0
opt_sil_lengths = phone_to_lengths[optional_silence_phone]
frequency_percentage = sum(opt_sil_lengths.values()) * 100.0 / num_utterances
# The reason for this warning is that the tradition in speech recognition is
# to supply a little silence at the beginning and end of utterances... up to
# maybe half a second. If your database is not like this, you should know;
# you may want to mess with the segmentation to add more silence.
if frequency_percentage < 80.0:
print("analyze_phone_length_stats.py: WARNING: optional-silence {0} is seen only {1}% "
"of the time at utterance {2}. This may not be optimal.".format(
optional_silence_phone_text, frequency_percentage, boundary_type))
# this will control a sentence that we print..
boundary_to_text = { }
boundary_to_text['begin'] = 'At utterance begin'
boundary_to_text['end'] = 'At utterance end'
boundary_to_text['all'] = 'Overall'
# the next block prints lines like (to give some examples):
# At utterance begin, SIL accounts for 98.4% of phone occurrences, with duration (median, mean, 95-percentile) is (57,59.9,113) frames.
# ...
# At utterance end, nonsilence accounts for 4.2% of phone occurrences, with duration (median, mean, 95-percentile) is (13,13.3,22) frames.
# ...
# Overall, R_I accounts for 3.2% of phone occurrences, with duration (median, mean, 95-percentile) is (6,6.9,12) frames.
for boundary_type in 'begin', 'end', 'all':
phone_to_lengths = phone_lengths[boundary_type]
tot_num_phones = total_phones[boundary_type]
# sort the phones in decreasing order of count.
for phone,lengths in sorted(phone_to_lengths.items(), key = lambda x : -sum(x[1].values())):
frequency_percentage = sum(lengths.values()) * 100.0 / tot_num_phones
if frequency_percentage < args.frequency_cutoff_percentage:
continue
duration_median = GetPercentile(lengths, 0.5)
duration_percentile_95 = GetPercentile(lengths, 0.95)
duration_mean = GetMean(lengths)
text = boundary_to_text[boundary_type] # e.g. 'At utterance begin'.
try:
phone_text = phone_int2text[phone]
except:
sys.exit("analyze_phone_length_stats.py: phone {0} is not covered on phones.txt "
"(lang/alignment mismatch?)".format(phone))
print("{text}, {phone_text} accounts for {percent}% of phone occurrences, with "
"duration (median, mean, 95-percentile) is ({median},{mean},{percentile95}) frames.".format(
text = text, phone_text = phone_text,
percent = "%.1f" % frequency_percentage,
median = duration_median, mean = "%.1f" % duration_mean,
percentile95 = duration_percentile_95))
## Print stats on frequency and average length of word-internal optional-silences.
## For optional-silence only, subtract the begin and end-utterance stats from the 'all'
## stats, to get the stats excluding initial and final phones.
total_frames['internal'] = total_frames['all'] - total_frames['begin'] - total_frames['end']
total_phones['internal'] = total_phones['all'] - total_phones['begin'] - total_phones['end']
internal_opt_sil_phone_lengths = dict(phone_lengths['all'][optional_silence_phone])
for length in internal_opt_sil_phone_lengths.keys():
# subtract the counts for begin and end from the overall counts to get the
# word-internal count.
internal_opt_sil_phone_lengths[length] -= (phone_lengths['begin'][optional_silence_phone][length] +
phone_lengths['end'][optional_silence_phone][length])
if total_phones['internal'] != 0.0:
total_internal_optsil_frames = sum([ float(l * c) for l,c in internal_opt_sil_phone_lengths.items() ])
total_optsil_frames = sum([ float(l * c)
for l,c in phone_lengths['all'][optional_silence_phone].items() ])
opt_sil_internal_frame_percent = total_internal_optsil_frames * 100.0 / total_frames['internal']
opt_sil_total_frame_percent = total_optsil_frames * 100.0 / total_frames['all']
internal_frame_percent = total_frames['internal'] * 100.0 / total_frames['all']
print("The optional-silence phone {0} occupies {1}% of frames overall ".format(
optional_silence_phone_text, "%.1f" % opt_sil_total_frame_percent))
hours_total = total_frames['all'] / 360000.0;
hours_nonsil = (total_frames['all'] - total_optsil_frames) / 360000.0
print("Limiting the stats to the {0}% of frames not covered by an utterance-[begin/end] phone, "
"optional-silence {1} occupies {2}% of frames.".format("%.1f" % internal_frame_percent,
optional_silence_phone_text,
"%.1f" % opt_sil_internal_frame_percent))
print("Assuming 100 frames per second, the alignments represent {0} hours of data, "
"or {1} hours if {2} frames are excluded.".format(
"%.1f" % hours_total, "%.1f" % hours_nonsil, optional_silence_phone_text))
opt_sil_internal_phone_percent = (sum(internal_opt_sil_phone_lengths.values()) *
100.0 / total_phones['internal'])
duration_median = GetPercentile(internal_opt_sil_phone_lengths, 0.5)
duration_mean = GetMean(internal_opt_sil_phone_lengths)
duration_percentile_95 = GetPercentile(internal_opt_sil_phone_lengths, 0.95)
print("Utterance-internal optional-silences {0} comprise {1}% of utterance-internal phones, with duration "
"(median, mean, 95-percentile) = ({2},{3},{4})".format(
optional_silence_phone_text, "%.1f" % | |
size = random.sample(range(0, 4), 3)
size = sorted(size)
if j == 5 :
SIZE = [[], [], []]
while len(SIZE[2]) == 0:
SIZE = []
size0 = random.randint(2, 4)
size1 = random.randint(2, 4)
L1 = random.sample(range(0, 4), size0)
L2 = random.sample(range(0, 4), size1)
L3 = []
L = L1 + L2
for l in L:
if L.count(l) == 1:
L3.append(l)
while sorted(list(set(L1).intersection(set(L2)))) == sorted(L3) or sorted(
list(set(L1).union(set(L2)))) == sorted(L3):
size0 = random.randint(2, 4)
size1 = random.randint(2, 4)
# size0=4
# size1=4
L1 = random.sample(range(0, 4), size0)
L2 = random.sample(range(0, 4), size1)
L3 = []
L = L1 + L2
for l in L:
if L.count(l) == 1:
L3.append(l)
SIZE.append(L1)
SIZE.append(L2)
SIZE.append(L3)
if j == 6 :
SIZE = [[], [], []]
while len(SIZE[2]) == 0:
SIZE = []
size0 = random.randint(2, 4)
size1 = random.randint(2, 4)
L1 = random.sample(range(0, 4), size0)
L2 = random.sample(range(0, 4), size1)
L = L1 + L2
L3 = set(L)
L3 = list(L3)
while sorted(list(set(L1).intersection(set(L2)))) == sorted(L3) or sorted(
list(set(list(set(L1).union(set(L2)))).difference(
set(list(set(L1).intersection(set(L2))))))) == sorted(
L3):
size0 = random.randint(2, 4)
size1 = random.randint(2, 4)
L1 = random.sample(range(0, 4), size0)
L2 = random.sample(range(0, 4), size1)
L = L1 + L2
L3 = set(L)
L3 = list(L3)
SIZE.append(L1)
SIZE.append(L2)
SIZE.append(L3)
if j == 7:
SIZE = [[], [], []]
while len(SIZE[2]) == 0:
SIZE = []
size0 = random.randint(2, 4)
size1 = random.randint(2, 4)
L1 = random.sample(range(0, 4), size0)
L2 = random.sample(range(0, 4), size1)
L3 = []
L = L1 + L2
for l in L:
if L.count(l) == 2:
L3.append(l)
L3 = set(L3)
L3 = list(L3)
while sorted(list(set(L1).union(set(L2)))) == sorted(L3) or \
sorted(list(
set(list(set(L1).union(set(L2)))).difference(set(list(set(L1).intersection(set(L2))))))) \
== sorted(L3):
size0 = random.randint(2, 4)
size1 = random.randint(2, 4)
L1 = random.sample(range(0, 4), size0)
L2 = random.sample(range(0, 4), size1)
L3 = []
L = L1 + L2
for l in L:
if L.count(l) == 2:
L3.append(l)
L3 = set(L3)
L3 = list(L3)
SIZE.append(L1)
SIZE.append(L2)
SIZE.append(L3)
# img_huidu = read_image(path='./huidu/')
count = 0
if mode==1:
picture = random.randint(0, len(img_huidu)//9*8 - 1)
if mode==2:
picture = random.randint(len(img_huidu) // 9 * 8, len(img_huidu) - 1)
ba = random.randint(0, len(img_back) - 1)
backpic = img_back[ba]
for l in list0:
b=0
e=0
for el in l:
plt.subplot(3, 3, el)
img = img_huidu[picture]
if type==0:
img = color(img,colo[count])
if type==1 or type==2 or type==3:
d=COLO[count][b]
img = color(img, d)
if b >= len(COLO[count]) - 1:
b = random.randint(0, len(COLO[count]) - 1)
else:
b = b + 1
if j==4:
img = resize(img, size[count])
elif j==5 or j==6 or j==7:
m= SIZE[count][e]
img = resize(img, m)
if e >= len(SIZE[count]) - 1:
e = random.randint(0, len(SIZE[count]) - 1)
else:
e = e + 1
plt.imshow(img)
plt.axis('off')
if count == 0:
name0 = str(j + type * second_rule_num + all_num) + '-' + str(count) + '.jpg'
path_name = os.path.join(path, name0)
plt.savefig(path_name)
# plt.show()
plt.clf() # 清图
if back == True:
blend_two_images(image=path_name, background=backpic)
name0 = name0 + ';'
elif count == 1:
name1 = str(j + type * second_rule_num + all_num) + '-' + str(count) + '.jpg'
path_name = os.path.join(path, name1)
plt.savefig(path_name)
# plt.show()
plt.clf() # 清图
if back == True:
blend_two_images(image=path_name, background=backpic)
name1 = name1 + ';'
elif count == 2:
name2 = str(j + type * second_rule_num + all_num) + '-' + str(count) + '.jpg'
path_name = os.path.join(path, name2)
plt.savefig(path_name)
# plt.show()
plt.clf() # 清图
if back == True:
blend_two_images(image=path_name, background=backpic)
name2 = name2 + ';'
count = count + 1
structure_str =[str0[type],str1[j]]
data_simple = {'img_name': name0 + name1 + name2, 'Text': '', 'Out': structure_str}
print(j + type * second_rule_num + all_num)
json_data_list[str(j + type * second_rule_num + all_num)] = data_simple
return json_data_list
def size_to(path,json_data_list,mode=1,back=True):
all_num = len(json_data_list)
first_rule_num = 4
second_rule_num = 4
for type in range (first_rule_num):
for j in range (second_rule_num):
str0 = ['size-progression', 'size-xor', 'size-or', 'size-and', ]
str1 = [ 'number-progression', 'position-xor', 'position-or', 'position-and']
list0 = []
if type== 0 :
size = random.sample(range(0, 4), 3)
size = sorted(size)
if type == 1 :
SIZE = [[], [], []]
while len(SIZE[2]) == 0:
SIZE = []
size0 = random.randint(2, 4)
size1 = random.randint(2, 4)
L1 = random.sample(range(0, 4), size0)
L2 = random.sample(range(0, 4), size1)
L3 = []
L = L1 + L2
for l in L:
if L.count(l) == 1:
L3.append(l)
while sorted(list(set(L1).intersection(set(L2)))) == sorted(L3) or sorted(
list(set(L1).union(set(L2)))) == sorted(L3):
size0 = random.randint(2, 4)
size1 = random.randint(2, 4)
# size0=4
# size1=4
L1 = random.sample(range(0, 4), size0)
L2 = random.sample(range(0, 4), size1)
L3 = []
L = L1 + L2
for l in L:
if L.count(l) == 1:
L3.append(l)
SIZE.append(L1)
SIZE.append(L2)
SIZE.append(L3)
if type == 2 :
SIZE = [[], [], []]
while len(SIZE[2]) == 0:
SIZE = []
size0 = random.randint(2, 4)
size1 = random.randint(2, 4)
L1 = random.sample(range(0, 4), size0)
L2 = random.sample(range(0, 4), size1)
L = L1 + L2
L3 = set(L)
L3 = list(L3)
while sorted(list(set(L1).intersection(set(L2)))) == sorted(L3) or sorted(
list(set(list(set(L1).union(set(L2)))).difference(
set(list(set(L1).intersection(set(L2))))))) == sorted(
L3):
size0 = random.randint(2, 4)
size1 = random.randint(2, 4)
L1 = random.sample(range(0, 4), size0)
L2 = random.sample(range(0, 4), size1)
L = L1 + L2
L3 = set(L)
L3 = list(L3)
SIZE.append(L1)
SIZE.append(L2)
SIZE.append(L3)
if type == 3 :
SIZE = [[], [], []]
while len(SIZE[2]) == 0:
SIZE = []
size0 = random.randint(2, 4)
size1 = random.randint(2, 4)
L1 = random.sample(range(0, 4), size0)
L2 = random.sample(range(0, 4), size1)
L3 = []
L = L1 + L2
for l in L:
if L.count(l) == 2:
L3.append(l)
L3 = set(L3)
L3 = list(L3)
while sorted(list(set(L1).union(set(L2)))) == sorted(L3) or \
sorted(list(
set(list(set(L1).union(set(L2)))).difference(set(list(set(L1).intersection(set(L2))))))) \
== sorted(L3):
size0 = random.randint(2, 4)
size1 = random.randint(2, 4)
L1 = random.sample(range(0, 4), size0)
L2 = random.sample(range(0, 4), size1)
L3 = []
L = L1 + L2
for l in L:
if L.count(l) == 2:
L3.append(l)
L3 = set(L3)
L3 = list(L3)
SIZE.append(L1)
SIZE.append(L2)
SIZE.append(L3)
if j == 0:
list0 = []
L = []
L3 = []
L4 = []
while L == sorted(L3) or sorted(L4) == sorted(L3):
a = random.sample(range(4, 10), 3)
a = sorted(a)
L1 = random.sample(range(1, 10), a[0])
L2 = random.sample(range(1, 10), a[1])
L = L1 + L2
L4 = []
for l in L:
if L.count(l) == 1:
L4.append(l)
L = set(L)
L = list(L)
L = sorted(L)
L3 = random.sample(range(1, 10), a[2])
list0.append(L1)
list0.append(L2)
list0.append(L3)
if j == 1:
list0 = [[], [], []]
while len(list0[2]) == 0:
list0 = []
L1 = []
L2 = []
L3 = []
L4 = []
L5 = []
while len(L3)<4 or sorted(L5) == sorted(L3) or sorted(L4) == sorted(L3) or (
len(L3) > len(L2) and len(L2) > len(L1)):
a = random.randint(4, 9)
b = random.randint(4, 9)
L1 = random.sample(range(1, 10), a)
L2 = random.sample(range(1, 10), b)
L3 = []
L5 = []
L = L1 + L2
L4 = set(L1 + L2)
L4 = list(L4)
for l in L:
if L.count(l) == 1:
L3.append(l)
if L.count(l) == 2:
L5.append(l)
L5 = list(set(L5))
list0.append(L1)
list0.append(L2)
list0.append(L3)
if j == 2:
list0 = [[], [], []]
while len(list0[2]) == 0:
list0 = []
L1 = []
L2 = []
L3 = []
L4 = []
L5 = []
while len(L3)<4 or sorted(L5) == sorted(L3) or sorted(L4) == sorted(L3) or (
len(L3) > len(L2) and len(L2) > len(L1)):
a = random.randint(4, 9)
b = random.randint(4, 9)
L1 = random.sample(range(1, 10), a)
L2 = random.sample(range(1, 10), b)
L = L1 + L2
L3 = set(L)
L3 = list(L3)
L4 = []
L5 = []
for l in L:
if L.count(l) == 1:
L4.append(l)
if L.count(l) == 2:
L5.append(l)
L5 = list(set(L5))
list0.append(L1)
list0.append(L2)
list0.append(L3)
if j == 3:
list0 = [[], [], []]
while len(list0[2]) == 0:
list0 = []
L1 = []
L2 = []
L3 = []
L4 = []
L5 = []
while len(L3)<4 or sorted(L5) == sorted(L3) or sorted(L4) == sorted(L3) or (len(L3) > len(L2) | |
# -*- coding: utf-8 -*-
# from __future__ import unicode_literals # NO DESCOMENTAR! ROMPE TODO!
from django.db import models, connection, connections
from django.contrib.auth.models import User
from django.conf import settings
# import mapscript
from layerimport.models import TablaGeografica, ArchivoRaster
from layers.models import Capa, Categoria, Metadatos, Atributo, ArchivoSLD, Escala
from layers.models import RasterDataSource, VectorDataSource, CONST_VECTOR, CONST_RASTER
import os
# slugs
from django.utils.text import slugify
# signals
from django.db.models.signals import post_save, post_delete, pre_save
from django.dispatch import receiver
from django.contrib.gis.geos import MultiPoint
# fts
from djorm_pgfulltext.models import SearchManager
from djorm_pgfulltext.fields import VectorField
# misc
from utils.commons import normalizar_texto, urlToFile, coordConvert, take
import urlparse
import urllib
import urllib2
import time
from lxml import etree
from subprocess import call
from utils import mapserver
from mapcache import mapcache
from .tasks import add_tileset, rm_tileset, add_or_replace_tileset
import json
from sequences.models import Sequence
from utils.db import drop_table
import pytz
MAPA_DEFAULT_SRS = 3857
MAPA_DEFAULT_SIZE = (110, 150)
MAPA_DEFAULT_IMAGECOLOR = '#C6E2F2' # debe ser formato Hexa
TIPO_DE_MAPA_ENUM = (
('', ''),
('layer', 'layer'), # mapa de capa
('layer_original_srs', 'layer_original_srs'), # mapa de capa con srs original
('user', 'user'), # mapa de usuario
('public_layers', 'public_layers'), # mapa de todas las capas publicas en el sistema
('general', 'general'), # mapa de cualquier otro mapa creado ad-hoc
('layer_raster_band', 'layer_raster_band') # mapa subproducto de alguna banda raster
)
def RepresentsPositiveInt(s):
try:
i = int(s)
return i > 0
except ValueError:
return False
class TMSBaseLayer(models.Model):
nombre = models.CharField('Nombre', null=False, blank=False, unique=True, max_length=255)
url = models.CharField('URL', null=False, blank=False, max_length=2000)
min_zoom = models.IntegerField('Min zoom', null=True, blank=True)
max_zoom = models.IntegerField('Max zoom', null=True, blank=True)
tms = models.BooleanField(u'TMS?', null=False, default=True)
fuente = models.CharField('Fuente', null=False, blank=True, max_length=255) # attribution
descripcion = models.TextField(u'Descripción', null=False, blank=True, max_length=10000)
class Meta:
verbose_name = 'TMS Base Layer'
verbose_name_plural = 'TMS Base Layers'
def __unicode__(self):
return unicode(self.nombre)
class ManejadorDeMapas:
@classmethod
def delete_mapfile(cls, id_mapa):
print "...ManejadorDeMapas.delete_mapfile %s"%(id_mapa)
try:
mapa=Mapa.objects.get(id_mapa=id_mapa)
# if instance.tipo_de_mapa == 'layer':
# manage.remove([instance.id_mapa])
except:
print "......error: mapa inexistente"
return
try:
os.remove(os.path.join(settings.MAPAS_PATH, id_mapa+'.map'))
except:
pass
# #if mapa.tipo_de_mapa in ['layer_original_srs', 'general']:
# try:
# os.remove(os.path.join(settings.MEDIA_ROOT, id_mapa+'.png'))
# except:
# pass
@classmethod
def commit_mapfile(cls,id_mapa):
print "ManejadorDeMapas.commit_mapfile: %s" %(id_mapa)
mapfile_full_path=os.path.join(settings.MAPAS_PATH, id_mapa+'.map')
if not os.path.isfile(mapfile_full_path):
if id_mapa=='mapground_public_layers':
cls.regenerar_mapa_publico()
else:
try:
mapa=Mapa.objects.get(id_mapa=id_mapa)
except:
print "....ManejadorDeMapas.commit_mapfile: ERROR: mapa inexistente %s" %(mapfile_full_path)
return ''
if mapa.tipo_de_mapa=='user':
cls.regenerar_mapas_de_usuarios([mapa.owner])
elif mapa.tipo_de_mapa=='public_layers':
cls.regenerar_mapa_publico()
elif mapa.tipo_de_mapa=='layer_raster_band':
mapa.save()
elif mapa.tipo_de_mapa in ['layer', 'layer_original_srs']:
mapa.save()
elif mapa.tipo_de_mapa in ['general']: # a evaluar...
mapa.save()
else:
return ''
return mapfile_full_path
@classmethod
def regenerar_mapas_de_usuarios(cls,lista_users_inicial=None):
from users.models import ManejadorDePermisos
print "...ManejadorDeMapas.regenerar_mapas_de_usuarios %s"%(str(lista_users_inicial))
q = Mapa.objects.filter(tipo_de_mapa='user')
if lista_users_inicial is not None:
q = q.filter(owner__in=lista_users_inicial)
for m in q:
m.mapserverlayer_set.all().delete()
lista_capas=ManejadorDePermisos.capas_de_usuario(m.owner, 'all').order_by('metadatos__titulo')
for idx, c in enumerate(lista_capas):
MapServerLayer(mapa=m,capa=c,orden_de_capa=idx).save()
m.save()
@classmethod
def regenerar_mapa_publico(cls):
print "...ManejadorDeMapas.regenerar_mapa_publico"
m, created = Mapa.objects.get_or_create(owner=User.objects.get(username='mapground'),nombre='mapground_public_layers',id_mapa='mapground_public_layers', tipo_de_mapa='public_layers')
m.mapserverlayer_set.all().delete()
for idx, c in enumerate(Capa.objects.filter(wxs_publico=True)):
MapServerLayer(mapa=m,capa=c,orden_de_capa=idx).save()
m.save()
@classmethod
def generar_thumbnail(cls,id_mapa):
print "...ManejadorDeMapas.generar_thumbnail"
try:
mapa=Mapa.objects.get(id_mapa=id_mapa)
thumb = mapa.generar_thumbnail()
return thumb
except:
return ''
@classmethod
def generar_legend(cls,id_mapa):
print "...ManejadorDeMapas.generar_legend"
try:
mapa=Mapa.objects.get(id_mapa=id_mapa)
return mapa.generar_legend()
except:
return False
@classmethod
def existe_mapa(cls, id_mapa):
try:
mapa=Mapa.objects.get(id_mapa=id_mapa)
return True
except:
return False
# podria ser Mapa/MapServerMap por separado
class Mapa(models.Model):
owner = models.ForeignKey(User, null=False,blank=False)
nombre = models.CharField('Nombre', null=False, blank=False, max_length=255)
id_mapa = models.CharField('Id mapa', null=False, blank=False, unique=True, max_length=255)
slug = models.SlugField('Slug', null=False, blank=True, max_length=255)
# metadatos del mapa
titulo = models.CharField(u'Título', null=False, blank=True, max_length=255) # title
fuente = models.TextField(u'Fuente', null=False, blank=True, max_length=1000) # attribution
contacto = models.TextField(u'Contacto', null=False, blank=True, max_length=1000) # contact organization
descripcion = models.TextField(u'Descripción', null=False, blank=True, max_length=10000) # abstract
#fechas?
srs = models.CharField('SRS', null=False, blank=True, max_length=255)
tipo_de_mapa = models.CharField('Tipo de Mapa', choices=TIPO_DE_MAPA_ENUM, max_length=30, null=False, blank=True, default='')
tms_base_layer = models.ForeignKey(TMSBaseLayer, verbose_name='Capa Base', null=True, blank=True, on_delete=models.SET_NULL)
capas = models.ManyToManyField(Capa, blank=True, through='MapServerLayer')
size = models.CharField('Size', null=False, blank=True, max_length=100)
extent = models.CharField('Extent', null=False, blank=True, max_length=100)
imagecolor = models.CharField('Imagecolor', null=False, blank=True, max_length=100)
imagetype = models.CharField('Imagetype', null=False, blank=True, max_length=100) #TODO
# seguir agregando tags de mapserver
publico = models.BooleanField(u'Público?', null=False, default=False)
categorias = models.ManyToManyField(Categoria, blank=True, verbose_name=u'Categorías')
escala = models.ForeignKey(Escala, null=True, blank=True, on_delete=models.SET_NULL)
palabras_claves = models.TextField(u'Palabras Claves', null=False, blank=True, max_length=10000,default='')
texto_output = models.TextField(u'Texto Output', null=False, blank=True, max_length=10000)
timestamp_alta = models.DateTimeField(auto_now_add=True, verbose_name='Fecha de alta')
timestamp_modificacion = models.DateTimeField(auto_now=True, verbose_name='Fecha de última modificación')
input_search_index = models.TextField(null=False, blank=True, default='')
search_index = VectorField()
class Meta:
verbose_name = 'Mapa'
verbose_name_plural = 'Mapas'
def __unicode__(self):
return unicode(self.nombre)
def actualizar_input_search_index(self):
if self.tipo_de_mapa=='general':
textos = []
textos.append(normalizar_texto(self.titulo))
textos.append(normalizar_texto(self.palabras_claves))
textos.append(normalizar_texto(self.escala))
textos.append(normalizar_texto(self.descripcion))
self.input_search_index = ' '.join(textos)
def save(self, *args, **kwargs):
escribir_imagen_y_mapfile = kwargs.pop('escribir_imagen_y_mapfile', True)
self.slug=slugify(unicode(self.nombre)).replace('-', '_')
# esto es para los casos de mapas 'general' que se crean a partir del titulo del form
# los otros casos se completan en los creates de los objetos
if self.nombre == '':
self.nombre=unicode(normalizar_texto(self.titulo))
if self.id_mapa == '':
self.id_mapa = '%s_%s'%(self.owner.username,self.nombre)
try:
msm=self.create_mapfile(False)
self.texto_output=msm.convertToString()[0:9999]
except:
self.texto_output=''
if self.tipo_de_mapa=='general':
self.actualizar_input_search_index()
super(Mapa, self).save(*args, **kwargs)
if escribir_imagen_y_mapfile:
self.create_mapfile(True)
self.generar_thumbnail_y_legend()
if self.tipo_de_mapa in ('layer', 'general'):# , 'layer_raster_band'):
self.agregar_a_mapcache()
return True
@property
def dame_titulo(self):
if self.titulo != '':
return self.titulo
if self.tipo_de_mapa in ('layer_original_srs', 'layer'):
try:
return self.capas.first().dame_titulo
except:
pass
elif self.tipo_de_mapa in ('layer_raster_band'):
try:
try:
msl = self.mapserverlayer_set.first()
banda = ' - ' + msl.capa.gdal_metadata['variables_detectadas'][msl.banda]['elemento']
except:
banda = ''
return self.capas.first().dame_titulo + banda
except:
pass
return ''
@property
def dame_descripcion(self):
if self.descripcion != '':
return self.descripcion
if self.tipo_de_mapa in ('layer_original_srs', 'layer'):
try:
return self.capas.first().dame_descripcion
except:
pass
return ''
@property
def dame_fuente(self):
if self.fuente!='':
return self.fuente
if self.tipo_de_mapa in ('layer_original_srs', 'layer'):
try:
return self.capas.first().dame_fuente
except:
pass
return ''
@property
def dame_contacto(self):
if self.contacto!='':
return self.contacto
if self.tipo_de_mapa in ('layer_original_srs', 'layer'):
try:
return self.capas.first().dame_contacto
except:
pass
return ''
@property
def dame_tilesurl(self):
if self.tipo_de_mapa in ('layer_original_srs', 'layer'):
try:
c = self.capas.first()
return settings.MAPCACHE_URL+'tms/1.0.0/%s@GoogleMapsCompatible/{z}/{x}/{y}.png?t=%s'%(c.id_capa, time.mktime(c.timestamp_modificacion.timetuple()))
except:
pass
elif self.tipo_de_mapa in ('general', 'layer_raster_band'):
return settings.MAPCACHE_URL+'tms/1.0.0/%s@GoogleMapsCompatible/{z}/{x}/{y}.png?t=%s'%(self.id_mapa, time.mktime(self.timestamp_modificacion.timetuple()))
return ''
# devuelve un string parametrizable tipo '-71.55 -41.966667 -63.0 -37.9'
def dame_extent(self, separator=' ', srid=4326):
# TODO: creo que en el caso de layer_raster_band habria que sacar el extent de los metadatos de la banda, ya que *quizas* cada uno podria tener uno distinto
if self.tipo_de_mapa in ('layer_original_srs', 'layer', 'layer_raster_band'):
try:
c = self.capas.first()
return c.dame_extent(separator, srid)
except:
pass
elif self.tipo_de_mapa in ('general'):
try:
extents=[]
for c in self.capas.all():
extents+=c.dame_extent([], srid)
mp=MultiPoint(extents)
return separator.join(map(str, mp.extent))
except:
pass
return ''
@property
def dame_filename(self):
if '_' in self.id_mapa:
res = self.id_mapa.split('_',1)[1]
else:
res = self.id_mapa
return res.encode('utf-8')
def dame_mapserver_size(self):
try:
if self.size!='':
if self.size.count(',')==1:
return map(lambda x: int(x),self.size.split(','))
else:
return map(lambda x: int(x),self.size.split())
return MAPA_DEFAULT_SIZE
except:
return MAPA_DEFAULT_SIZE
# @property
# devuelve una 4-upla de floats para aplicar al mapObj
def dame_mapserver_extent(self, srid=4326):
try:
if self.extent!='':
if self.extent.count(',')==3:
ext = map(lambda x: float(x), self.extent.split(','))
else:
ext = map(lambda x: float(x), self.extent.split()) # feo, pero permite mas de un espacio entre valores
minxy = coordConvert(ext[0], ext[1], 4326, srid)
maxxy = coordConvert(ext[2], ext[3], 4326, srid)
return [minxy.x, minxy.y, maxxy.x, maxxy.y]
except:
return None
@property
def dame_imagecolor(self):
try:
if self.imagecolor!='':
if self.imagecolor[0]=='#':
return True, self.imagecolor
else:
if self.imagecolor.count(',')==2:
return False, map(lambda x: int(x), self.imagecolor.split(','))
elif self.imagecolor.count(' ')==2:
return False, map(lambda x: int(x), self.imagecolor.split())
return True, MAPA_DEFAULT_IMAGECOLOR
except:
return True, MAPA_DEFAULT_IMAGECOLOR
@property
def dame_projection(self):
return unicode(self.srs) if self.srs!='' and RepresentsPositiveInt(self.srs) else str(MAPA_DEFAULT_SRS)
@property
def dame_wxs_url(self):
if self.tipo_de_mapa=='public_layers':
return urlparse.urljoin(settings.SITE_URL,'/layers/public_wxs/')
elif self.tipo_de_mapa=='user':
return urlparse.urljoin(settings.SITE_URL, '/users/'+self.owner.username+'/wxs/')
elif self.tipo_de_mapa=='layer_original_srs':
return urlparse.urljoin(settings.SITE_URL,'/layers/wxs/'+unicode(self.id_mapa.replace('_layer_srs',''))+'/')
elif self.tipo_de_mapa=='layer_raster_band':
c=self.capas.first()
if c.wxs_publico:
return urlparse.urljoin(settings.SITE_URL, '/layers/public_wxs_raster_band/' + unicode(self.id_mapa) + '/')
else:
return urlparse.urljoin(settings.SITE_URL, '/layers/wxs_raster_band/' + unicode(self.id_mapa) + '/')
elif self.tipo_de_mapa=='layer':
c=self.capas.first()
if c.wxs_publico:
return urlparse.urljoin(settings.SITE_URL,'/layers/public_wxs/'+unicode(self.id_mapa)+'/')
return urlparse.urljoin(settings.SITE_URL,'/layers/wxs/'+unicode(self.id_mapa)+'/')
@property
def showAsWMSLayer(self):
c=self.capas.first()
if self.tipo_de_mapa == 'layer' or self.tipo_de_mapa == 'layer_raster_band':
if c is not None:
return c.mostrarComoWMS()
return False
def dame_mapserver_map_def(self):
es_hexa, color = self.dame_imagecolor
srid = self.srs if self.tipo_de_mapa == 'layer_original_srs' else self.dame_projection
bbox = self.dame_extent(',', srid)
mapExtent = self.extent if self.tipo_de_mapa == 'layer_original_srs' else self.dame_mapserver_extent(int(srid))
wxs_url = self.dame_wxs_url
layers = []
c=self.capas.first()
enableContextInfo = True
if self.tipo_de_mapa in ('layer', 'layer_raster_band'):
enableContextInfo = c.tipo_de_capa != CONST_RASTER
if self.tipo_de_mapa in ('layer', 'layer_original_srs', 'user', 'general', 'layer_raster_band'):
mapserverlayers = self.mapserverlayer_set.all().order_by('orden_de_capa','capa__metadatos__titulo')
elif self.tipo_de_mapa == 'public_layers':
mapserverlayers = self.mapserverlayer_set.filter(capa__wxs_publico=True).order_by('orden_de_capa')
for msl in mapserverlayers:
if self.tipo_de_mapa=='general':
l=msl.dame_mapserver_layer_def('WMS')
else:
l=msl.dame_mapserver_layer_def(msl.dame_layer_connection_type())
l['metadata']['ows_srs'] = 'epsg:%s epsg:4326 epsg:3857'%(srid) if RepresentsPositiveInt(srid) else 'epsg:4326 epsg:3857'
layers.append(l)
data = {
"idMapa": self.id_mapa,
"imageColor": {
"type": "hex" if es_hexa else "rgb",
"color": color
},
"srid": srid,
"srs": 'epsg:%s'%(srid) if self.srs=='' or RepresentsPositiveInt(self.srs) else self.srs,
"mapFullExtent": mapExtent,
"mapBoundingBox": map(lambda x: | |
<filename>train_tempobert.py
#!/usr/bin/env python
"""
Training script for temporal BERT model using temporal attention.
Based on https://github.com/huggingface/transformers/blob/master/examples/pytorch/language-modeling/run_mlm.py
"""
import math
from dataclasses import dataclass, field
from datetime import datetime
from pathlib import Path
from typing import Optional
import datasets
from dotenv import load_dotenv
from loguru import logger
import data_utils
import hf_utils
from models.tempobert.modeling_tempobert import TempoBertForMaskedLM
from temporal_data_collator import DataCollatorForTimePrependedLanguageModeling
from temporal_text_dataset import TemporalText
from transformers import DataCollatorForLanguageModeling, Trainer, TrainingArguments
from transformers.hf_argparser import HfArgumentParser
from transformers.models.auto.configuration_auto import CONFIG_MAPPING
from transformers.models.auto.tokenization_auto import TOKENIZER_MAPPING
@dataclass
class ModelArguments:
"""
Arguments pertaining to which model/config/tokenizer we are going to fine-tune, or train from scratch.
"""
model_name_or_path: Optional[str] = field(
default=None,
metadata={
"help": "The model checkpoint for weights initialization."
"Don't set if you want to train a model from scratch."
},
)
config_overrides: Optional[str] = field(
default=None,
metadata={
"help": "Override some existing default config settings when a model is trained from scratch. Example: "
"n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index"
},
)
config_name: Optional[str] = field(
default=None,
metadata={
"help": "Pretrained config name or path if not the same as model_name"
},
)
tokenizer_name: Optional[str] = field(
default=None,
metadata={
"help": "Pretrained tokenizer name or path if not the same as model_name"
},
)
cache_dir: Optional[str] = field(
default=None,
metadata={
"help": "Where do you want to store the pretrained models downloaded from huggingface.co"
},
)
use_fast_tokenizer: bool = field(
default=True,
metadata={
"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."
},
)
freeze_layers: Optional[str] = field(
default=False,
metadata={
"help": "True to freeze all encoder layers, or a string specifying the layer numbers to freeze."
},
)
hidden_size: Optional[int] = field(
default=768,
metadata={"help": "Dimensionality of the encoder layers and the pooler layer."},
)
num_hidden_layers: Optional[int] = field(
default=12,
metadata={"help": "Number of hidden layers in the Transformer encoder."},
)
tokenizer: Optional[str] = field(
default='bert-base',
metadata={
"help": "Tokenizer name without case, e.g., `bert-base`. Use `cased_tokenizer` to specify the case."
},
)
gradient_checkpointing: Optional[bool] = field(
default=False,
metadata={
"help": "If True, use gradient checkpointing to save memory at the expense of slower backward pass."
},
)
time_embedding_type: Optional[str] = field(
default="temporal_attention",
metadata={
"help": "Time embedding type. Possible values: `prepend_token`, `temporal_attention`."
},
)
def __post_init__(self):
if self.config_overrides is not None and (
self.config_name is not None or self.model_name_or_path is not None
):
raise ValueError(
"--config_overrides can't be used in combination with --config_name or --model_name_or_path"
)
@dataclass
class DataTrainingArguments:
"""
Arguments pertaining to what data we are going to input our model for training and eval.
"""
dataset_name: Optional[str] = field(
default="temporal_text_dataset.py",
metadata={"help": "The name of the dataset to use (via the datasets library)."},
)
dataset_config_name: Optional[str] = field(
default=None,
metadata={
"help": "The configuration name of the dataset to use (via the datasets library)."
},
)
train_path: Optional[str] = field(
default=None, metadata={"help": "The input training data file or directory."}
)
validation_path: Optional[str] = field(
default=None,
metadata={
"help": "An optional input evaluation data file or directory to evaluate the perplexity on."
},
)
overwrite_cache: bool = field(
default=False,
metadata={"help": "Overwrite the cached training and evaluation sets"},
)
max_seq_length: Optional[int] = field(
default=None,
metadata={
"help": "The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated."
},
)
preprocessing_num_workers: Optional[int] = field(
default=None,
metadata={"help": "The number of processes to use for the preprocessing."},
)
mlm_probability: float = field(
default=0.15,
metadata={"help": "Ratio of tokens to mask for masked language modeling loss"},
)
time_mlm_probability: Optional[float] = field(
default=None,
metadata={
"help": "Ratio of time tokens to mask for masked language modeling loss (relevant in case of a time-prepended model). "
"If None, time tokens are occasionally masked, like any other token."
},
)
line_by_line: bool = field(
default=False,
metadata={
"help": "Whether distinct lines of text in the dataset are to be handled as distinct sequences."
},
)
pad_to_max_length: bool = field(
default=False,
metadata={
"help": "Whether to pad all samples to `max_seq_length`. "
"If False, will pad the samples dynamically when batching to the maximum length in the batch."
},
)
times: Optional[str] = field(
default=None, metadata={"help": "List of time points for the model to use."}
)
words_for_vocab_file: Optional[str] = field(
default=None,
metadata={"help": "Text file containing words to add to the model vocabulary."},
)
corpus_name: Optional[str] = field(
default=None,
metadata={"help": "The name of the corpus (e.g., liverpool, semeval_eng)."},
)
def __post_init__(self):
if (
self.dataset_name is None
and self.train_path is None
and self.validation_path is None
):
raise ValueError(
"Need either a dataset name or a training/validation path."
)
def freeze_model_layers(model, freeze_layers_arg):
if freeze_layers_arg:
if isinstance(freeze_layers_arg, bool):
for layer in model.base_model.encoder.layer:
for param in layer.parameters():
param.requires_grad = False
elif isinstance(freeze_layers_arg, str):
layer_indexes = [int(x) for x in freeze_layers_arg.split(",")]
for layer_idx in layer_indexes:
for param in list(
model.base_model.encoder.layer[layer_idx].parameters()
):
param.requires_grad = False
def tokenize_dataset_line_by_line(
dataset,
data_args,
training_args,
tokenizer,
text_column_name,
column_names,
max_seq_length,
return_special_tokens_mask,
):
"""Tokenize each nonempty line."""
def _tokenize(examples, data_args, tokenizer, text_column_name):
padding = "max_length" if data_args.pad_to_max_length else False
return tokenizer(
examples[text_column_name],
examples['time'],
padding=padding,
truncation=True,
max_length=max_seq_length,
return_special_tokens_mask=return_special_tokens_mask,
)
def tokenize_function(examples):
# Remove empty lines
examples[text_column_name] = [
line for line in examples[text_column_name] if line and not line.isspace()
]
return _tokenize(examples, data_args, tokenizer, text_column_name)
with training_args.main_process_first(desc="dataset map tokenization"):
return dataset.map(
tokenize_function,
batched=True,
num_proc=data_args.preprocessing_num_workers,
remove_columns=column_names,
load_from_cache_file=not data_args.overwrite_cache,
desc="Running tokenizer on dataset line_by_line",
)
def tokenize_dataset_concat(
dataset,
data_args,
training_args,
tokenizer,
text_column_name,
column_names,
max_seq_length,
return_special_tokens_mask,
):
"""Tokenize every text, then concatenate them together before splitting them in smaller parts."""
def tokenize_function(examples):
return tokenizer(
examples[text_column_name],
examples['time'],
return_special_tokens_mask=return_special_tokens_mask,
)
with training_args.main_process_first(desc="dataset map tokenization"):
tokenized_datasets = dataset.map(
tokenize_function,
batched=True,
num_proc=data_args.preprocessing_num_workers,
remove_columns=column_names,
load_from_cache_file=not data_args.overwrite_cache,
desc="Running tokenizer on every text in dataset",
)
# Concatenate all texts from our dataset and generate chunks of max_seq_length
def group_texts(examples):
concatenated_examples = {k: sum(examples[k], []) for k in examples.keys()}
total_length = len(concatenated_examples[list(examples.keys())[0]])
# We drop the small remainder, we could add padding if the model supported it instead of this drop, you can
# customize this part to your needs
if total_length >= max_seq_length:
total_length = (total_length // max_seq_length) * max_seq_length
# Split by chunks of max_len
result = {
k: [
t[i : i + max_seq_length]
for i in range(0, total_length, max_seq_length)
]
for k, t in concatenated_examples.items()
}
return result
# Note that with `batched=True`, this map processes 1,000 texts together, so group_texts throws away a
# remainder for each of those groups of 1,000 texts. You can adjust that batch_size here but a higher value
# might be slower to preprocess.
# To speed up this part, we use multiprocessing. See the documentation of the map method for more information:
# https://huggingface.co/docs/datasets/package_reference/main_classes.html#datasets.Dataset.map
with training_args.main_process_first(desc="grouping texts together"):
return tokenized_datasets.map(
group_texts,
batched=True,
num_proc=data_args.preprocessing_num_workers,
load_from_cache_file=not data_args.overwrite_cache,
desc=f"Grouping texts in chunks of {max_seq_length}",
)
def load_data(
corpus_path,
data_args,
training_args,
model_args,
tokenizer,
):
dataset_files = data_utils.iterdir(corpus_path, suffix=".txt", to_str=True)
logger.info("Loading dataset files...")
dataset = datasets.load_dataset(
data_args.dataset_name,
data_files=dataset_files,
split="train", # Note the split is always labeled "train"
cache_dir=model_args.cache_dir,
)
logger.info(f"Loaded dataset of {dataset.num_rows:,} rows. Preprocessing...")
column_names = dataset.column_names
text_column_name = "text" if "text" in column_names else column_names[0]
if data_args.max_seq_length is None:
max_seq_length = tokenizer.model_max_length
if max_seq_length > 1024:
logger.warning(
f"The tokenizer picked seems to have a very large `model_max_length` ({tokenizer.model_max_length}). "
"Picking 1024 instead."
)
max_seq_length = 1024
else:
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
f"The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the"
f"model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}."
)
max_seq_length = min(data_args.max_seq_length, tokenizer.model_max_length)
# DataCollatorForLanguageModeling is more efficient when it receives the `special_tokens_mask`.
return_special_tokens_mask = True
if data_args.line_by_line:
tokenized_dataset = tokenize_dataset_line_by_line(
dataset,
data_args,
training_args,
tokenizer,
text_column_name,
column_names,
max_seq_length,
return_special_tokens_mask,
)
else:
tokenized_dataset = tokenize_dataset_concat(
dataset,
data_args,
training_args,
tokenizer,
text_column_name,
column_names,
max_seq_length,
return_special_tokens_mask,
)
return tokenized_dataset
def train_tempobert():
"""Main training function for temporal BERT"""
parser = HfArgumentParser(
(ModelArguments, DataTrainingArguments, TrainingArguments)
)
model_args, data_args, training_args, last_checkpoint = hf_utils.init_run(parser)
if (training_args.do_eval and not data_args.validation_path) or (
not training_args.do_eval and data_args.validation_path
):
logger.error(f"{training_args.do_eval=} but {data_args.validation_path=}")
exit()
dataset_files = data_utils.iterdir(data_args.train_path, suffix=".txt")
if data_args.times:
if ',' in data_args.times:
times = data_args.times.split(',')
elif '-' in data_args.times:
from_time, to_time = data_args.times.split('-')
times = list(map(str, range(from_time, to_time + 1)))
else:
times = [data_args.times]
else:
times = sorted([TemporalText.find_time(f.name) for f in dataset_files])
logger.info(f'Loaded {len(times)} time points from {data_args.train_path}.')
# Set the model and data collator | |
<gh_stars>1-10
# -*- coding: utf-8 -*-
import subprocess
from pymatgen.io.vasp.sets import DictSet
#!!! The warning that is raised here is because there is no YAML! It can be ignored
class StaticEnergyCalc(DictSet):
CONFIG = {
"INCAR": {
"EDIFF": 1.0e-07,
"EDIFFG": -1e-04,
"ENCUT": 520,
"ISIF": 3, # !!! do I want this..?
"ISMEAR": 0, # Guassian smearing #!!! read docs!
"LCHARG": True, # write CHGCAR
"LAECHG": True, # write AECCAR0, AECCAR1, and AECCAR2
"LWAVE": True, # write WAVECAR
"NSW": 0, # single energy calc
"PREC": "Accurate", # !!! USE Accurate WHEN NOT DOING BADELF
"IVDW": 12, # van der waals correction
"ISMEAR": 0, # Guassian smearing = 0 if system unknown!!!!!!!!!!!
"SIGMA": 0.060,
# 'NBANDS': 643, # Calculate more bands than normal (extra empty)
"SYMPREC": 1e-8, # !!! CUSTODIAN FIX - dont use unless needed
# 'ISYM': 0,
# 'NGX': 100,
# 'NGY': 100,
# 'NGZ': 100,
"NGXF": 100,
"NGYF": 100,
"NGZF": 100,
"NCORE": 8,
#!!! TESTING
# ELFCAR (optional)
# 'LELF': True, # write ELFCAR
# 'NPAR': 1, # Must be set if LELF is set to True
},
"KPOINTS": {"reciprocal_density": 100},
"POTCAR_FUNCTIONAL": "PBE_54",
"POTCAR": {
"Ac": "Ac",
"Ag": "Ag",
"Al": "Al",
"Ar": "Ar",
"As": "As",
"Au": "Au",
"B": "B",
"Ba": "Ba_sv",
"Be": "Be_sv",
"Bi": "Bi",
"Br": "Br",
"C": "C",
"Ca": "Ca_sv",
"Cd": "Cd",
"Ce": "Ce",
"Cl": "Cl",
"Co": "Co",
"Cr": "Cr_pv",
"Cs": "Cs_sv",
"Cu": "Cu_pv",
"Dy": "Dy_3",
"Er": "Er_3",
"Eu": "Eu",
"F": "F",
"Fe": "Fe_pv",
"Ga": "Ga_d",
"Gd": "Gd",
"Ge": "Ge_d",
"H": "H",
"He": "He",
"Hf": "Hf_pv",
"Hg": "Hg",
"Ho": "Ho_3",
"I": "I",
"In": "In_d",
"Ir": "Ir",
"K": "K_sv",
"Kr": "Kr",
"La": "La",
"Li": "Li_sv",
"Lu": "Lu_3",
"Mg": "Mg_pv",
"Mn": "Mn_pv",
"Mo": "Mo_pv",
"N": "N",
"Na": "Na_pv",
"Nb": "Nb_pv",
"Nd": "Nd_3",
"Ne": "Ne",
"Ni": "Ni_pv",
"Np": "Np",
"O": "O",
"Os": "Os_pv",
"P": "P",
"Pa": "Pa",
"Pb": "Pb_d",
"Pd": "Pd",
"Pm": "Pm_3",
"Pr": "Pr_3",
"Pt": "Pt",
"Pu": "Pu",
"Rb": "Rb_sv",
"Re": "Re_pv",
"Rh": "Rh_pv",
"Ru": "Ru_pv",
"S": "S",
"Sb": "Sb",
"Sc": "Sc_sv",
"Se": "Se",
"Si": "Si",
"Sm": "Sm_3",
"Sn": "Sn_d",
"Sr": "Sr_sv",
"Ta": "Ta_pv",
"Tb": "Tb_3",
"Tc": "Tc_pv",
"Te": "Te",
"Th": "Th",
"Ti": "Ti_pv",
"Tl": "Tl_d",
"Tm": "Tm_3",
"U": "U",
"V": "V_pv",
"W": "W_pv",
"Xe": "Xe",
"Y": "Y_sv",
"Yb": "Yb_2",
"Zn": "Zn",
"Zr": "Zr_sv",
},
}
def __init__(self, structure, **kwargs):
"""
:param structure: Structure
:param kwargs: Same as those supported by DictSet.
"""
super().__init__(structure, StaticEnergyCalc.CONFIG, **kwargs)
self.kwargs = kwargs
class NonSCFCalc(DictSet):
CONFIG = {
"INCAR": {
"EDIFF": 1.0e-07,
"EDIFFG": -1e-04,
"ENCUT": 520,
"ISIF": 3, # !!! do I want this..?
"ISMEAR": 0, # Guassian smearing #!!! read docs!
"LCHARG": True, # write CHGCAR
"LAECHG": True, # write AECCAR0, AECCAR1, and AECCAR2
# "LWAVE": True,
"NSW": 0, # single energy calc
"PREC": "Accurate", # !!! USE Accurate WHEN NOT DOING BADELF
"IVDW": 12, # van der waals correction
"ISMEAR": -5, # Guassian smearing ##### <<<<<<<<< Changed from the first calc
# "SIGMA": 0.060, ##### <<<<<<<<< Changed from the first calc
# 'NBANDS': 643, # Calculate more bands than normal (extra empty)
"SYMPREC": 1e-8, #!!! CUSTODIAN FIX - dont use unless needed
"ISYM": 2,
"NGXF": 100,
"NGYF": 100,
"NGZF": 100,
"NCORE": 8,
#!!! TESTING
# ELFCAR (optional)
# 'LELF': True, # write ELFCAR
# 'NPAR': 1, # Must be set if LELF is set to True
# PDOS
"ISTART": 1, # continuation file -- read the WAVECAR
"ICHARG": 11,
"LORBIT": 1, #!!! use 11 if you don't want to set radii and 1 if you do
# 'RWIGS': '', #!!! SET BELOW AND SET RADII IN SAME ORDER AS POSCAR
"NEDOS": 2001, # number of grid states for evaluating DOS
# NBANDS, EMIN/EMAX are some others parameters that I can consider
},
"KPOINTS": {
"reciprocal_density": 300
}, ##### <<<<<<<<< Changed from the first calc
"POTCAR_FUNCTIONAL": "PBE_54",
"POTCAR": {
"Ac": "Ac",
"Ag": "Ag",
"Al": "Al",
"Ar": "Ar",
"As": "As",
"Au": "Au",
"B": "B",
"Ba": "Ba_sv",
"Be": "Be_sv",
"Bi": "Bi",
"Br": "Br",
"C": "C",
"Ca": "Ca_sv",
"Cd": "Cd",
"Ce": "Ce",
"Cl": "Cl",
"Co": "Co",
"Cr": "Cr_pv",
"Cs": "Cs_sv",
"Cu": "Cu_pv",
"Dy": "Dy_3",
"Er": "Er_3",
"Eu": "Eu",
"F": "F",
"Fe": "Fe_pv",
"Ga": "Ga_d",
"Gd": "Gd",
"Ge": "Ge_d",
"H": "H",
"He": "He",
"Hf": "Hf_pv",
"Hg": "Hg",
"Ho": "Ho_3",
"I": "I",
"In": "In_d",
"Ir": "Ir",
"K": "K_sv",
"Kr": "Kr",
"La": "La",
"Li": "Li_sv",
"Lu": "Lu_3",
"Mg": "Mg_pv",
"Mn": "Mn_pv",
"Mo": "Mo_pv",
"N": "N",
"Na": "Na_pv",
"Nb": "Nb_pv",
"Nd": "Nd_3",
"Ne": "Ne",
"Ni": "Ni_pv",
"Np": "Np",
"O": "O",
"Os": "Os_pv",
"P": "P",
"Pa": "Pa",
"Pb": "Pb_d",
"Pd": "Pd",
"Pm": "Pm_3",
"Pr": "Pr_3",
"Pt": "Pt",
"Pu": "Pu",
"Rb": "Rb_sv",
"Re": "Re_pv",
"Rh": "Rh_pv",
"Ru": "Ru_pv",
"S": "S",
"Sb": "Sb",
"Sc": "Sc_sv",
"Se": "Se",
"Si": "Si",
"Sm": "Sm_3",
"Sn": "Sn_d",
"Sr": "Sr_sv",
"Ta": "Ta_pv",
"Tb": "Tb_3",
"Tc": "Tc_pv",
"Te": "Te",
"Th": "Th",
"Ti": "Ti_pv",
"Tl": "Tl_d",
"Tm": "Tm_3",
"U": "U",
"V": "V_pv",
"W": "W_pv",
"Xe": "Xe",
"Y": "Y_sv",
"Yb": "Yb_2",
"Zn": "Zn",
"Zr": "Zr_sv",
},
}
def __init__(self, structure, **kwargs):
"""
:param structure: Structure
:param kwargs: Same as those supported by DictSet.
"""
super().__init__(structure, NonSCFCalc.CONFIG, **kwargs)
self.kwargs = kwargs
import pandas
def parse_ACF(filename="ACF.dat"):
# open the file, grab the lines, and then close it
file = open(filename)
lines = file.readlines()
file.close()
# establish the headers. Note that I ignore the '#' column as this is just site index.
headers = ("x", "y", "z", "charge", "min_dist", "atomic_vol")
# create a list that we will load data into
bader_data = []
# The first 2 lines are header and the final 4 lines are the footer. This is always
# true so we don't need to iterate through those. The data we want is between the
# header and footer so that's what we loop through.
for line in lines[2:-4]:
# by running strip, we convert the line from a string to a list of
# The values are all still strings, so we convert them to int/floats before saving
# I add [1:] because the first value is just '#' which is site index and we dont need
line_data = [eval(value) for value in line.split()[1:]]
# add the line data to our ouput
bader_data.append(line_data)
# convert the list to a pandas dataframe
dataframe = pandas.DataFrame(
data=bader_data,
columns=headers,
)
# Extra data is included in the footer that we can grab too. For each line, the data
# is a float that is at the end of the line, hence the split()[-1].
extra_data = {
"vacuum_charge": float(lines[-3].split()[-1]),
"vacuum_volume": float(lines[-2].split()[-1]),
"nelectrons": float(lines[-1].split()[-1]),
}
return dataframe, extra_data
# -----------------------------------------------------------------------------
# if this is the starting point...
from pymatgen.core.structure import Structure
structure = Structure.from_file("Y2C.cif")
structure = structure.get_primitive_structure()
structure = structure.copy(sanitize=True)
calc = StaticEnergyCalc(structure)
# save the calc files
calc.write_input(".")
# -----------------------------------------------------------------------------
# RUN STATIC ENERGY CALC
print("Running vasp...")
# run vasp
subprocess.run(
"module load vasp; mpirun -np 144 vasp_std > vasp.out",
shell=True,
)
# -----------------------------------------------------------------------------
# RUN BADER ANALYSIS
subprocess.run("cp CHG CHG_step1", shell=True)
subprocess.run("./chgsum.pl AECCAR0 AECCAR2 > addingcharges.out", shell=True)
subprocess.run("./bader CHG -ref CHGCAR_sum > bader.out", shell=True)
# subprocess.run('./bader CHG -ref ELFCAR > bader.out', shell=True)
# After bader is ran, we want to look at the data
dataframe, extra_data = parse_ACF(filename="ACF.dat")
# Update the NonSCFCalc settings with RWIGS
# ...based off min-dist
# NonSCFCalc.CONFIG["INCAR"].update({"RWIGS": str(dataframe.min_dist.values)[1:-1]})
# ...based off total volume
# import math
# import numpy
# radii_from_vol = numpy.array([((3*volume)/(4*math.pi))**(1/3) for volume in dataframe.atomic_vol.values])
# NonSCFCalc.CONFIG["INCAR"].update({"RWIGS": str(radii_from_vol)[1:-1]})
NonSCFCalc.CONFIG["INCAR"].update({"RWIGS": "1.584 1.699 1.552"})
# -----------------------------------------------------------------------------
structure = Structure.from_file("CONTCAR")
calc = NonSCFCalc(structure)
# save the calc files
calc.write_input(".")
# -----------------------------------------------------------------------------
#######
# Now run this calculation
# And then move the results back to the same directory
#!!! If you want an empty atom, you must manually add this text at the bottom of the POSCAR
# Empty (spheres)
# 1
# 0.000000 0.000000 0.000000
#!!! I also need to set LORBIT to 1 instead of 11
#!!! I need to set the RWIGS tag too so that it knows what radius the sphere has
#######
# Now run this calculation
print("Running vasp...")
subprocess.run(
"module load vasp; mpirun -np 144 vasp_std > vasp.out",
shell=True,
)
# /nas/longleaf/apps-dogwood/vasp/5.4.4/bin/vasp_std
# -----------------------------------------------------------------------------
# from pymatgen.io.vasp.outputs import Vasprun
# xmlReader = Vasprun(filename= "vasprun.xml",
# parse_dos = True,
# parse_eigen = True,
# parse_projected_eigen = True, #!!! **Note that this can take an extreme amount of time and memory.** So use this | |
<gh_stars>0
"""
s3peat - Fast uploading directories to S3
.. rubric:: Example usage
.. code-block:: python
from s3peat import S3Bucket, sync_to_s3
# Create a S3Bucket instance, which is used to create connections to S3
bucket = S3Bucket('my-bucket', AWS_KEY, AWS_SECRET)
# Call the sync_to_s3 method
failures = sync_to_s3(directory='my/directory', prefix='my/key,
bucket=bucket, concurrency=50)
# A list of filenames will be returned if there were failures in uploading
if not failures:
print "No failures"
else:
print "Failed:", failures
"""
import os
import sys
import time
import signal
import logging
from threading import Thread
import boto
__version__ = '0.3.1'
class S3Bucket(object):
"""
Create new connections to an S3 bucket.
This object mostly exists to make the :class:`S3Queue` API cleaner, but it
can be reused anywhere.
:param name: S3 bucket name to upload to
:param key: AWS key
:param secret: AWS secret
:param public: Whether uploads should be public (default: ``True``)
:type name: str
:type aws_key: str
:type aws_secret: str
"""
def __init__(self, name, key, secret, public=True):
self.name = name
self.key = key
self.secret = secret
self.public = public
def get_new(self):
"""
Return a new :class:`boto.s3.bucket.Bucket` with its own connection.
"""
conn = boto.connect_s3(self.key, self.secret)
try:
return conn.get_bucket(self.name)
except boto.exception.NoAuthHandlerFound:
print >>sys.stderr, ("AWS credentials not properly configured, "
"please supply --key and --secret arguments.")
sys.exit(1)
def __str__(self):
return self.name
class S3Queue(Thread):
"""
Take a list of `filenames` and upload them to S3 with leading key `prefix`.
If `counter` is specified, it will be called once with no arguments for
each successful upload, and once with ``False`` as its only argument for
each failed upload.
:param prefix: S3 key prefix
:param filenames: Iterable of filenames
:param bucket: A :class:`S3Bucket` instance
:param strip_path: Leading path to strip (optional)
:param counter: This is called once for each upload (optional)
:type prefix: str
:type filenames: list
:type bucket: :class:`S3Bucket`
:type strip_path: str
:type counter: callable
If `strip_path` is specified, `strip_path` will be stripped from the front
of each filename before composing the uploaded key.
If there are any exceptions raised while uploading a file, that filename
will be available in the :attr:`~S3Queue.failed` list. An empty list means
there were no exceptions raised during upload.
This runs as a single thread and one can :meth:`join` it to wait for it to
finish.
The iterable object `filenames` shouldn't be modified or referenced by
other threads, as that would not be thread-safe.
"""
def __init__(self, prefix, filenames, bucket, strip_path=None, **kwargs):
# Get the counting callback if it's set
self.counter = kwargs.pop('counter', None)
kwargs.setdefault('name', "S3Queue.{}:{}".format(bucket, id(self)))
super(S3Queue, self).__init__(**kwargs)
self.log = logging.getLogger(self.name)
self.prefix = prefix.strip('/')
self.filenames = filenames
self.failed = []
self.bucket = bucket
self.strip_path = strip_path
def run(self):
""" Run method for the threading API. """
bucket = self.bucket.get_new()
# Iterate over the filenames attempting to upload them
while self.filenames:
self._upload(self.filenames.pop(), bucket)
def _upload(self, filename, bucket):
"""
Upload `filename` to `bucket`.
:param filename: Filename to upload
:param bucket: A :class:`boto.s3.bucket.Bucket` instance
:type filename: str
:type bucket: :class:`boto.s3.bucket.Bucket`
"""
# Get a new key in this bucket, set its name and upload to it
try:
key = self._key(filename)
s3key = boto.s3.key.Key(bucket)
s3key.key = key
s3key.set_contents_from_filename(filename)
# Set the access for this key
if self.bucket.public:
s3key.set_acl('public-read')
else:
s3key.set_acl('authenticated-read')
except:
self.log.debug("Failed %r", key, exc_info=True)
self.failed.append(filename)
if self.counter:
self.counter(False)
else:
self.log.debug("Uploaded %r", key)
if self.counter:
self.counter()
def _key(self, filename):
"""
Return a S3 key from `filename`.
:param filename: A filename
:type filename: str
"""
# Remove the leading path if necessary
if self.strip_path and filename.startswith(self.strip_path):
filename = filename[len(self.strip_path):]
# Strip the filename of leading slashies
filename = filename.lstrip('/')
# Join it to the prefix and go!
return '/'.join((self.prefix, filename))
def __str__(self):
return self.name
class S3Uploader(object):
"""
Runs a set of parallel uploads.
:param directory: Directory to sync
:param prefix: S3 key prefix
:param bucket: A :class:`S3Bucket` instance
:param exclude: List of filename regexes to exclude (optional)
:param include: List of filename regexes to include (optional)
:param concurrency: Number of concurrent uploads to use (default: 1)
:param output: File or stream to output progress to (optional)
:type directory: str
:type prefix: str
:type bucket: :class:`S3Bucket`
:type exclude: list
:type include: list
:type concurrency: int
:type output: file
"""
def __init__(self, directory, prefix, bucket, include=None, exclude=None,
concurrency=1, noreplace=False, output=None):
self.directory = directory
self.prefix = prefix
self.bucket = bucket
self.include = include
self.exclude = exclude
self.concurrency = concurrency
self.output = output
self.noreplace = noreplace
self.total = 0
self.count = 0
self.errors = 0
self.queues = []
def upload(self):
"""
Starts the uploading and returns a list of failed filenames.
"""
self.count = 0
self.errors = 0
self.queues = []
# Set up the signal catcher so Ctrl+C works
signal.signal(signal.SIGINT, self.stop)
# Get all the files
filenames = self.get_filenames(split=True)
# Start a queue with each group of files
for queue in filenames:
queue = S3Queue(self.prefix, queue, self.bucket, self.directory,
counter=self.counter)
self.queues.append(queue)
queue.daemon = True
queue.start()
# Wait for the queues to all finish
failures = []
while True:
remaining = sum([len(q.filenames) for q in self.queues])
if not remaining:
break
time.sleep(0.1)
for queue in self.queues:
failures.extend(queue.failed)
if self.output:
self.output.write('\n')
return failures
def stop(self, *args):
"""
Stop all the running queues.
It works by clearing all the queues list of files left to process,
which means the current files will finish.
"""
print
print >>sys.stderr, "Stopping... "
for queue in self.queues:
queue.filenames = []
sys.exit(1)
def get_filenames(self, split=False):
"""
Return a list of filenames to upload, filtered by :attr:`include` and
:attr:`exclude`, if set.
If `split` is ``True``, then this method returns a list of lists, where
filenames are evenly divided into :attr:`concurrency` groups.
After running this method, :attr:`total` will be set to the number of
filenames found.
"""
filenames = []
self.total = 0
for path, dirs, files in os.walk(self.directory):
for filename in files:
filename = os.path.join(path, filename)
# Iterate over all the include regexes, determining if we
# should include this filename
if self.include:
skip = True
for reg in self.include:
if reg.search(filename):
skip = False
break
if skip:
continue
# Iterate over the exclude regexes, seeing if we should skip
if self.exclude:
skip = False
for reg in self.exclude:
if reg.search(filename):
skip = True
break
if skip:
continue
filenames.append(filename)
self.total += 1
if self.noreplace:
s3_keys = [key.key for key in self.bucket.get_new().list()]
stripped_filenames = [filename.replace('{}/'.format(self.directory), '') for filename in filenames]
replaceable_filenames = list(set(stripped_filenames).difference(set(s3_keys)))
filenames = ['{}/{}'.format(self.directory, filename) for filename in replaceable_filenames]
if split:
groups = [list() for i in xrange(self.concurrency)]
for i in xrange(len(filenames)):
groups[i % self.concurrency].append(filenames[i])
filenames = groups
return filenames
def counter(self, error=False):
"""
Increment :attr:`count` for each time this is called.
If `error` is ``True`` then :attr:`errors` is incremented too.
:param bool error: Indicates there was an error
"""
# XXX: We may need a lock around this
if error:
self.errors += 1
self.count += 1
self._output()
def _output(self):
"""
Print the current progress.
"""
if not self.output:
# Exit out if we don't have a stream to output to
return
# Get the total count as a string, so we can find its length
total = str(self.total)
# Compose a format specifier using the length of the total
count = "{:" + str(len(total)) + "d}"
# Format the count nicely with our specifier, which pads with spaces
count = count.format(self.count)
# Compose our whole line
line = count + "/" + total + " files uploaded"
# Add the error count if we have one
if self.errors:
line += " ({} error{})".format(self.errors,
self.errors > 1 and 's' or '')
# Add spacing to blot out the rest of the line
line += " " * (int(os.environ.get('COLUMNS', 80)) - len(line) - 1)
# Write the line to the stream, using \r to start us at the beginning
self.output.write('\r' + line)
# Flush the stream if that's possible
if hasattr(self.output, 'flush'):
self.output.flush()
def sync_to_s3(directory, prefix, bucket, include=None, exclude=None,
concurrency=1, output=None):
"""
This is a convenience wrapper around :class:`S3Uploader`.
"""
uploader = S3Uploader(directory, prefix, bucket, include=include,
exclude=exclude, concurrency=concurrency, output=output)
return | |
tile_size + tile_half_size,int(starting_positions[player_index][1]) * tile_size + tile_half_size)
pygame.draw.rect(self.preview_map_image,tile_color,pygame.Rect(pos_x,pos_y,tile_size,tile_size))
pygame.draw.circle(self.preview_map_image,Renderer.COLOR_RGB_VALUES[player_index],draw_position,tile_half_size)
y = tile_size * GameMap.MAP_HEIGHT + map_info_border_size
column = 0
self.preview_map_image.blit(self.environment_images[temp_map.get_environment_name()][0],(0,y))
# draw starting item icons
starting_x = Renderer.MAP_TILE_WIDTH + 5
x = starting_x
pygame.draw.rect(self.preview_map_image,(255,255,255),pygame.Rect(x,y,Renderer.MAP_TILE_WIDTH,Renderer.MAP_TILE_HEIGHT))
starting_items = temp_map.get_starting_items()
for i in xrange(len(starting_items)):
item = starting_items[i]
if item in self.icon_images:
item_image = self.icon_images[item]
self.preview_map_image.blit(item_image,(x + 1,y + 1))
x += item_image.get_size()[0] + 1
column += 1
if column > 2:
column = 0
x = starting_x
y += 12
#----------------------------------------------------------------------------
def __prerender_map(self, map_to_render):
self.animation_events = [] # clear previous animation
debug_log("prerendering map...")
# following images are only needed here, so we dont store them to self
image_trampoline = pygame.image.load(os.path.join(Game.RESOURCE_PATH,"other_trampoline.png"))
image_teleport = pygame.image.load(os.path.join(Game.RESOURCE_PATH,"other_teleport.png"))
image_arrow_up = pygame.image.load(os.path.join(Game.RESOURCE_PATH,"other_arrow_up.png"))
image_arrow_right = pygame.image.load(os.path.join(Game.RESOURCE_PATH,"other_arrow_right.png"))
image_arrow_down = pygame.image.load(os.path.join(Game.RESOURCE_PATH,"other_arrow_down.png"))
image_arrow_left = pygame.image.load(os.path.join(Game.RESOURCE_PATH,"other_arrow_left.png"))
image_lava = pygame.image.load(os.path.join(Game.RESOURCE_PATH,"other_lava.png"))
image_background = pygame.image.load(os.path.join(Game.RESOURCE_PATH,"other_map_background.png"))
self.prerendered_map_background.blit(image_background,(0,0))
for j in xrange(GameMap.MAP_HEIGHT):
for i in xrange(GameMap.MAP_WIDTH):
render_position = (i * Renderer.MAP_TILE_WIDTH + Renderer.MAP_BORDER_WIDTH,j * Renderer.MAP_TILE_HEIGHT + + Renderer.MAP_BORDER_WIDTH)
self.prerendered_map_background.blit(self.environment_images[map_to_render.get_environment_name()][0],render_position)
tile = map_to_render.get_tile_at((i,j))
helper_mapping = {
MapTile.SPECIAL_OBJECT_TELEPORT_A: image_teleport,
MapTile.SPECIAL_OBJECT_TELEPORT_B: image_teleport,
MapTile.SPECIAL_OBJECT_TRAMPOLINE: image_trampoline,
MapTile.SPECIAL_OBJECT_ARROW_UP: image_arrow_up,
MapTile.SPECIAL_OBJECT_ARROW_RIGHT: image_arrow_right,
MapTile.SPECIAL_OBJECT_ARROW_DOWN: image_arrow_down,
MapTile.SPECIAL_OBJECT_ARROW_LEFT: image_arrow_left,
MapTile.SPECIAL_OBJECT_LAVA: image_lava
}
if tile.special_object in helper_mapping:
self.prerendered_map_background.blit(helper_mapping[tile.special_object],render_position)
game_info = map_to_render.get_game_number_info()
game_info_text = self.render_text(self.font_small,"game " + str(game_info[0]) + " of " + str(game_info[1]),(255,255,255))
self.prerendered_map_background.blit(game_info_text,((self.prerendered_map_background.get_size()[0] - game_info_text.get_size()[0]) / 2,self.prerendered_map_background.get_size()[1] - game_info_text.get_size()[1]))
self.prerendered_map = map_to_render
#----------------------------------------------------------------------------
##< Gets an info about how given player whould be rendered in format (image to render, sprite center, relative pixel offset, draw_shadow, overlay images).
def __get_player_render_info(self, player, game_map):
profiler.measure_start("map rend. player")
draw_shadow = True
relative_offset = [0,0]
overlay_images = []
if player.is_dead():
profiler.measure_stop("map rend. player")
return (None, (0,0), (0,0), False, [])
sprite_center = Renderer.PLAYER_SPRITE_CENTER
animation_frame = (player.get_state_time() / 100) % 4
color_index = player.get_number() if game_map.get_state() == GameMap.STATE_WAITING_TO_PLAY else player.get_team_number()
if player.is_in_air():
if player.get_state_time() < Player.JUMP_DURATION / 2:
quotient = abs(player.get_state_time() / float(Player.JUMP_DURATION / 2))
else:
quotient = 2.0 - abs(player.get_state_time() / float(Player.JUMP_DURATION / 2))
scale = (1 + 0.5 * quotient)
player_image = self.player_images[color_index]["down"]
image_to_render = pygame.transform.scale(player_image,(int(scale * player_image.get_size()[0]),int(scale * player_image.get_size()[1])))
draw_shadow = False
relative_offset[0] = -1 * (image_to_render.get_size()[0] / 2 - Renderer.PLAYER_SPRITE_CENTER[0]) # offset caused by scale
relative_offset[1] = -1 * int(math.sin(quotient * math.pi / 2.0) * Renderer.MAP_TILE_HEIGHT * GameMap.MAP_HEIGHT) # height offset
elif player.is_teleporting():
image_to_render = self.player_images[color_index][("up","right","down","left")[animation_frame]]
elif player.is_boxing() or player.is_throwing():
if not player.is_throwing() and animation_frame == 0:
helper_string = ""
else:
helper_string = "box "
helper_string += ("up","right","down","left")[player.get_direction_number()]
image_to_render = self.player_images[color_index][helper_string]
else:
helper_string = ("up","right","down","left")[player.get_direction_number()]
if player.is_walking():
image_to_render = self.player_images[color_index]["walk " + helper_string][animation_frame]
else:
image_to_render = self.player_images[color_index][helper_string]
if player.get_disease() != Player.DISEASE_NONE:
overlay_images.append(self.other_images["disease"][animation_frame % 2])
profiler.measure_stop("map rend. player")
return (image_to_render,sprite_center,relative_offset,draw_shadow,overlay_images)
#----------------------------------------------------------------------------
##< Same as __get_player_render_info, but for bombs.
def __get_bomb_render_info(self, bomb, game_map):
profiler.measure_start("map rend. bomb")
sprite_center = Renderer.BOMB_SPRITE_CENTER
animation_frame = (bomb.time_of_existence / 100) % 4
relative_offset = [0,0]
overlay_images = []
if bomb.has_detonator():
overlay_images.append(self.other_images["antena"])
if bomb.time_of_existence < Bomb.DETONATOR_EXPIRATION_TIME:
animation_frame = 0 # bomb won't pulse if within detonator expiration time
if bomb.movement == Bomb.BOMB_FLYING:
normalised_distance_travelled = bomb.flight_info.distance_travelled / float(bomb.flight_info.total_distance_to_travel)
helper_offset = -1 * bomb.flight_info.total_distance_to_travel + bomb.flight_info.distance_travelled
relative_offset = [
int(bomb.flight_info.direction[0] * helper_offset * Renderer.MAP_TILE_WIDTH),
int(bomb.flight_info.direction[1] * helper_offset * Renderer.MAP_TILE_HALF_HEIGHT)]
relative_offset[1] -= int(math.sin(normalised_distance_travelled * math.pi) * bomb.flight_info.total_distance_to_travel * Renderer.MAP_TILE_HEIGHT / 2) # height in air
image_to_render = self.bomb_images[animation_frame]
if bomb.has_spring:
overlay_images.append(self.other_images["spring"])
profiler.measure_stop("map rend. bomb")
return (image_to_render,sprite_center,relative_offset,True,overlay_images)
#----------------------------------------------------------------------------
def render_map(self, map_to_render):
result = pygame.Surface(self.screen_resolution)
self.menu_background_image = None # unload unneccessarry images
self.menu_item_images = None
self.preview_map_name = ""
self.preview_map_image = None
self.update_info_boards(map_to_render.get_players())
if map_to_render != self.prerendered_map: # first time rendering this map, prerender some stuff
self.__prerender_map(map_to_render)
profiler.measure_start("map rend. backg.")
result.blit(self.prerendered_map_background,self.map_render_location)
profiler.measure_stop("map rend. backg.")
# order the players and bombs by their y position so that they are drawn correctly
profiler.measure_start("map rend. sort")
ordered_objects_to_render = []
ordered_objects_to_render.extend(map_to_render.get_players())
ordered_objects_to_render.extend(map_to_render.get_bombs())
ordered_objects_to_render.sort(key = lambda what: 1000 if (isinstance(what,Bomb) and what.movement == Bomb.BOMB_FLYING) else what.get_position()[1]) # flying bombs are rendered above everything else
profiler.measure_stop("map rend. sort")
# render the map by lines:
tiles = map_to_render.get_tiles()
environment_images = self.environment_images[map_to_render.get_environment_name()]
y = Renderer.MAP_BORDER_WIDTH + self.map_render_location[1]
y_offset_block = Renderer.MAP_TILE_HEIGHT - environment_images[1].get_size()[1]
y_offset_wall = Renderer.MAP_TILE_HEIGHT - environment_images[2].get_size()[1]
line_number = 0
object_to_render_index = 0
flame_animation_frame = (pygame.time.get_ticks() / 100) % 2
for line in tiles:
x = (GameMap.MAP_WIDTH - 1) * Renderer.MAP_TILE_WIDTH + Renderer.MAP_BORDER_WIDTH + self.map_render_location[0]
while True: # render players and bombs in the current line
if object_to_render_index >= len(ordered_objects_to_render):
break
object_to_render = ordered_objects_to_render[object_to_render_index]
if object_to_render.get_position()[1] > line_number + 1:
break
if isinstance(object_to_render,Player):
image_to_render, sprite_center, relative_offset, draw_shadow, overlay_images = self.__get_player_render_info(object_to_render, map_to_render)
else: # bomb
image_to_render, sprite_center, relative_offset, draw_shadow, overlay_images = self.__get_bomb_render_info(object_to_render, map_to_render)
if image_to_render == None:
object_to_render_index += 1
continue
if draw_shadow:
render_position = self.tile_position_to_pixel_position(object_to_render.get_position(),Renderer.SHADOW_SPRITE_CENTER)
render_position = (
(render_position[0] + Renderer.MAP_BORDER_WIDTH + relative_offset[0]) % self.prerendered_map_background.get_size()[0] + self.map_render_location[0],
render_position[1] + Renderer.MAP_BORDER_WIDTH + self.map_render_location[1])
result.blit(self.other_images["shadow"],render_position)
render_position = self.tile_position_to_pixel_position(object_to_render.get_position(),sprite_center)
render_position = ((render_position[0] + Renderer.MAP_BORDER_WIDTH + relative_offset[0]) % self.prerendered_map_background.get_size()[0] + self.map_render_location[0],render_position[1] + Renderer.MAP_BORDER_WIDTH + relative_offset[1] + self.map_render_location[1])
result.blit(image_to_render,render_position)
for additional_image in overlay_images:
result.blit(additional_image,render_position)
object_to_render_index += 1
for tile in reversed(line): # render tiles in the current line
profiler.measure_start("map rend. tiles")
if not tile.to_be_destroyed: # don't render a tile that is being destroyed
if tile.kind == MapTile.TILE_BLOCK:
result.blit(environment_images[1],(x,y + y_offset_block))
elif tile.kind == MapTile.TILE_WALL:
result.blit(environment_images[2],(x,y + y_offset_wall))
elif tile.item != None:
result.blit(self.item_images[tile.item],(x,y))
if len(tile.flames) != 0: # if there is at least one flame, draw it
sprite_name = tile.flames[0].direction
result.blit(self.flame_images[flame_animation_frame][sprite_name],(x,y))
# for debug: uncomment this to see danger values on the map
# pygame.draw.rect(result,(int((1 - map_to_render.get_danger_value(tile.coordinates) / float(GameMap.SAFE_DANGER_VALUE)) * 255.0),0,0),pygame.Rect(x + 10,y + 10,30,30))
x -= Renderer.MAP_TILE_WIDTH
profiler.measure_stop("map rend. tiles")
x = (GameMap.MAP_WIDTH - 1) * Renderer.MAP_TILE_WIDTH + Renderer.MAP_BORDER_WIDTH + self.map_render_location[0]
y += Renderer.MAP_TILE_HEIGHT
line_number += 1
# update animations
profiler.measure_start("map rend. anim")
for animation_index in self.animations:
self.animations[animation_index].draw(result)
profiler.measure_stop("map rend. anim")
# draw info boards
profiler.measure_start("map rend. boards")
players_by_numbers = map_to_render.get_players_by_numbers()
x = self.map_render_location[0] + 12
y = self.map_render_location[1] + self.prerendered_map_background.get_size()[1] + 20
for i in players_by_numbers:
if players_by_numbers[i] == None or self.player_info_board_images[i] == None:
continue
if players_by_numbers[i].is_dead():
movement_offset = (0,0)
else:
movement_offset = (int(math.sin(pygame.time.get_ticks() / 64.0 + i) * 2),int(4 * math.sin(pygame.time.get_ticks() / 128.0 - i)))
result.blit(self.player_info_board_images[i],(x + movement_offset[0],y + movement_offset[1]))
x += self.gui_images["info board"].get_size()[0] - 2
profiler.measure_stop("map rend. boards")
profiler.measure_start("map rend. earthquake")
if map_to_render.earthquake_is_active(): # shaking effect
random_scale = random.uniform(0.99,1.01)
result = pygame.transform.rotate(result,random.uniform(-4,4))
profiler.measure_stop("map rend. earthquake")
if map_to_render.get_state() == GameMap.STATE_WAITING_TO_PLAY:
third = GameMap.START_GAME_AFTER / 3
countdown_image_index = max(3 - map_to_render.get_map_time() / third,1)
countdown_image = self.gui_images["countdown"][countdown_image_index]
countdown_position = (self.screen_center[0] - countdown_image.get_size()[0] / 2,self.screen_center[1] - countdown_image.get_size()[1] / 2)
result.blit(countdown_image,countdown_position)
return result
#==============================================================================
class AI(object):
REPEAT_ACTIONS = (100,300) ##< In order not to compute actions with every single call to
# play(), actions will be stored in self.outputs and repeated
# for next random(REPEAT_ACTIONS[0],REPEAT_ACTIONS[1]) ms - saves
# CPU time and prevents jerky AI movement.
#----------------------------------------------------------------------------
def __init__(self, player, game_map):
self.player = player
self.game_map = game_map
self.outputs = [] ##< holds currently active outputs
self.recompute_compute_actions_on = 0
self.do_nothing = False ##< this can turn AI off for debugging purposes
self.didnt_move_since = 0
#----------------------------------------------------------------------------
def tile_is_escapable(self, tile_coordinates):
if not self.game_map.tile_is_walkable(tile_coordinates) or self.game_map.tile_has_flame(tile_coordinates):
return False
tile = self.game_map.get_tile_at(tile_coordinates)
if tile.special_object == MapTile.SPECIAL_OBJECT_LAVA:
return False
return True
#----------------------------------------------------------------------------
## Returns a two-number tuple of x, y coordinates, where x and y are
# either -1, 0 or 1, indicating a rough general direction in which to
# move in order to prevent AI from walking in nonsensical direction (towards
# outside of the map etc.).
def decide_general_direction(self):
players = self.game_map.get_players()
enemy_players = filter(lambda p: p.is_enemy(self.player) and not p.is_dead(), players)
enemy_player = enemy_players[0] if len(enemy_players) > 0 else self.player
my_tile_position = self.player.get_tile_position()
another_player_tile_position = enemy_player.get_tile_position()
dx = another_player_tile_position[0] - my_tile_position[0]
dy = another_player_tile_position[1] - | |
order = None ):
"""
https://developer.nulab-inc.com/docs/backlog/api/2/get-comment-list
"""
params = { "apiKey": self.apikey }
_addkw( params, "minId", minId )
_addkw( params, "maxId", maxId )
_addkw( params, "count", count )
_addkw( params, "order", order )
url = self._makeurl( "/api/v2/issues/" + str( issueIdOrKey ) + \
"/comments" )
return self._api_return(
requests.get( url, params = params ) )
def addComment( self, issueIdOrKey, content,
notifiedUserIds = None,
attachmentIds = None ):
"""
https://developer.nulab-inc.com/docs/backlog/api/2/add-comment
"""
params = { "apiKey": self.apikey }
data = {"content": content}
_addkw( data, "content", content )
_addkws( data, "notifiedUserId", notifiedUserIds )
_addkws( data, "attachmentId", attachmentIds )
url = self._makeurl( "/api/v2/issues/" + str( issueIdOrKey ) + \
"/comments" )
return self._api_return(
requests.post( url, params = params, data = data ) )
def countComment( self, issueIdOrKey ):
"""
https://developer.nulab-inc.com/docs/backlog/api/2/count-comment
"""
params = { "apiKey": self.apikey }
url = self._makeurl( "/api/v2/issues/" + str( issueIdOrKey ) + \
"/comments/count" )
return self._api_return(
requests.get( url, params = params ) )
def getComment( self, issueIdOrKey, commentId ):
"""
https://developer.nulab-inc.com/docs/backlog/api/2/get-comment
"""
params = { "apiKey": self.apikey }
url = self._makeurl( "/api/v2/issues/" + str( issueIdOrKey ) + \
"/comments/" + str( commentId ) )
return self._api_return(
requests.get( url, params = params ) )
def updateComment( self, issueIdOrKey, commentId, content ):
"""
https://developer.nulab-inc.com/docs/backlog/api/2/update-comment
"""
params = { "apiKey": self.apikey }
data = { "content": content }
url = self._makeurl( "/api/v2/issues/" + str( issueIdOrKey ) + \
"/comments/" + str( commentId ) )
return self._api_return(
requests.patch( url, params = params, data = data ) )
def getListPfCommentNotifications( self, issueIdOrKey, commentId ):
"""
https://developer.nulab-inc.com/docs/backlog/api/2/get-list-of-comment-notifications
"""
params = { "apiKey": self.apikey }
url = self._makeurl( "/api/v2/issues/" + str( issueIdOrKey ) + \
"/comments/" + str( commentId ) + \
"/notifications" )
return self._api_return(
requests.get( url, params = params ) )
def addCommentNotification( self, issueIdOrKey, commentId, notifiedUserIds ):
"""
https://developer.nulab-inc.com/docs/backlog/api/2/add-comment-notification
"""
params = { "apiKey": self.apikey }
data = {}
_addkws( data, "notifiedUserId", notifiedUserIds )
url = self._makeurl( "/api/v2/issues/" + str( issueIdOrKey ) + \
"/comments/" + str( commentId ) + \
"/notifications" )
return self._api_return(
requests.post( url, params = params, data = data ) )
def getListOfIssueAttachments( self, issueIdOrKey ):
"""
https://developer.nulab-inc.com/docs/backlog/api/2/get-list-of-issue-attachments
"""
params = { "apiKey": self.apikey }
url = self._makeurl( "/api/v2/issues/" + str( issueIdOrKey ) + \
"/attachments" )
return self._api_return(
requests.get( url, params = params ) )
def getIssueAttachment( self, issueIdOrKey, attachmentId,
output = "path",
dirpath = "." ):
"""
https://developer.nulab-inc.com/docs/backlog/api/2/get-issue-attachment
"""
params = { "apiKey": self.apikey }
url = self._makeurl( "/api/v2/issues/" + str( issueIdOrKey ) + \
"/attachments/" + str( attachmentId ) )
return self._api_return(
requests.get( url, params = params, stream = True ),
output = output,
dirpath = dirpath )
def deleteIssueAttachment( self, issueIdOrKey, attachmentId ):
"""
https://developer.nulab-inc.com/docs/backlog/api/2/delete-issue-attachment
"""
params = { "apiKey": self.apikey }
url = self._makeurl( "/api/v2/issues/" + str( issueIdOrKey ) + \
"/attachments/" + str( attachmentId ) )
return self._api_return(
requests.delete( url, params = params ) )
def getListOfLinkedSharedFiles( self, issueIdOrKey ):
"""
https://developer.nulab-inc.com/docs/backlog/api/2/get-list-of-linked-shared-files
"""
params = { "apiKey": self.apikey }
url = self._makeurl( "/api/v2/issues/" + str( issueIdOrKey ) + \
"/sharedFiles" )
return self._api_return(
requests.get( url, params = params ) )
def removeLinkToSharedFileFromIssue( self, issueIdOrKey, fileId ):
"""
https://developer.nulab-inc.com/docs/backlog/api/2/remove-link-to-shared-file-from-issue
"""
params = { "apiKey": self.apikey }
data = {}
_addkw( data, "fileId", fileId )
url = self._makeurl( "/api/v2/issues/" + str( issueIdOrKey ) + \
"/sharedFiles/" + str( fileId ) )
return self._api_return(
requests.delete( url, params = params, data = data ) )
def getWikiPageList( self, projectIdOrKey ):
"""
https://developer.nulab-inc.com/docs/backlog/api/2/get-wiki-page-list
"""
params = { "apiKey": self.apikey, "projectIdOrKey": projectIdOrKey }
url = self._makeurl( "/api/v2/wikis" )
return self._api_return(
requests.get( url, params = params ) )
def countWikiPage( self, projectIdOrKey ):
"""
https://developer.nulab-inc.com/docs/backlog/api/2/count-wiki-page
"""
params = { "apiKey": self.apikey, "projectIdOrKey": projectIdOrKey }
url = self._makeurl( "/api/v2/wikis/count" )
return self._api_return(
requests.get( url, params = params ) )
def getWikiPageTagList( self, projectIdOrKey ):
"""
https://developer.nulab-inc.com/docs/backlog/api/2/get-wiki-page-tag-list
"""
params = { "apiKey": self.apikey, "projectIdOrKey": projectIdOrKey }
url = self._makeurl( "/api/v2/wikis/tags" )
return self._api_return(
requests.get( url, params = params ) )
def addWikiPage( self, projectId, name, content,
mailNotify = None ):
"""
https://developer.nulab-inc.com/docs/backlog/api/2/add-wiki-page
"""
params = { "apiKey": self.apikey }
data = { "projectId": projectId , "name": name, "content": content }
_addkw( data, "mailNotify", mailNotify )
url = self._makeurl( "/api/v2/wikis" )
return self._api_return(
requests.post( url, params = params, data = data ) )
def getWikiPage( self, wikiId ):
"""
https://developer.nulab-inc.com/docs/backlog/api/2/get-wiki-page
"""
params = { "apiKey": self.apikey }
url = self._makeurl( "/api/v2/wikis/" + str( wikiId ) )
return self._api_return(
requests.get( url, params = params ) )
def updateWikiPage( self, wikiId,
name = None,
content = None,
mailNotify = None ):
"""
https://developer.nulab-inc.com/docs/backlog/api/2/update-wiki-page
"""
params = { "apiKey": self.apikey }
data = {}
_addkw( data, "name", name )
_addkw( data, "content", content )
_addkw( data, "mailNotify", mailNotify )
url = self._makeurl( "/api/v2/wikis/" + str( wikiId ) )
return self._api_return(
requests.patch( url, params = params, data = data ) )
def deleteWikiPage( self, wikiId,
mailNotify = None ):
"""
https://developer.nulab-inc.com/docs/backlog/api/2/delete-wiki-page
"""
params = { "apiKey": self.apikey }
data = {}
_addkw( data, "mailNotify", mailNotify )
url = self._makeurl( "/api/v2/wikis/" + str( wikiId ) )
return self._api_return(
requests.delete( url, params = params, data = data ) )
def getListOfWikiAttachments( self, wikiId ):
"""
https://developer.nulab-inc.com/docs/backlog/api/2/get-list-of-wiki-attachments
"""
params = { "apiKey": self.apikey }
url = self._makeurl( "/api/v2/wikis/" + str( wikiId ) + \
"/attachments" )
return self._api_return(
requests.get( url, params = params ) )
def attachFileToWiki( self, wikiId, attachmentIds ):
"""
https://developer.nulab-inc.com/docs/backlog/api/2/attach-file-to-wiki
"""
params = { "apiKey": self.apikey }
data = {}
_addkws( data, "attachmentId", attachmentIds )
url = self._makeurl( "/api/v2/wikis/" + str( wikiId ) + \
"/attachments" )
return self._api_return(
requests.post( url, params = params, data = data ) )
def getWikiPageAttachment( self, wikiId, attachmentId,
output = "path",
dirpath = "." ):
"""
https://developer.nulab-inc.com/docs/backlog/api/2/get-wiki-page-attachment
"""
params = { "apiKey": self.apikey }
url = self._makeurl( "/api/v2/wikis/" + str( wikiId ) + \
"/attachments/" + str( attachmentId ) )
return self._api_return(
requests.get( url, params = params, stream = True ),
output = output,
dirpath = dirpath )
def removeWikiAttachment( self, wikiId, attachmentId ):
"""
https://developer.nulab-inc.com/docs/backlog/api/2/remove-wiki-attachment
"""
params = { "apiKey": self.apikey }
url = self._makeurl( "/api/v2/wikis/" + str( wikiId ) + \
"/attachments/" + str( attachmentId ) )
return self._api_return(
requests.delete( url, params = params ) )
def getListOfSharedFilesOnWiki( self, wikiId ):
"""
https://developer.nulab-inc.com/docs/backlog/api/2/get-list-of-shared-files-on-wiki
"""
params = { "apiKey": self.apikey }
url = self._makeurl( "/api/v2/wikis/" + str( wikiId ) + \
"/sharedFiles" )
return self._api_return(
requests.get( url, params = params ) )
def linkSharedFilesToWiki( self, wikiId, fileIds ):
"""
https://developer.nulab-inc.com/docs/backlog/api/2/link-shared-files-to-wiki
"""
params = { "apiKey": self.apikey }
data = {}
_addkws( data, "fileId", fileIds )
url = self._makeurl( "/api/v2/wikis/" + str( wikiId ) + \
"/sharedFiles" )
return self._api_return(
requests.post( url, params = params, data = data ) )
def removeLinkToSharedFileFromWiki( self, wikiId, fileId ):
"""
https://developer.nulab-inc.com/docs/backlog/api/2/remove-link-to-shared-file-from-wiki
"""
params = { "apiKey": self.apikey }
url = self._makeurl( "/api/v2/wikis/" + str( wikiId ) + \
"/sharedFiles/" + str( fileId ) )
return self._api_return(
requests.delete( url, params = params ) )
def getWikiPageHistory( self, wikiId,
minId = None,
maxId = None,
count = None,
order = None ):
"""
https://developer.nulab-inc.com/docs/backlog/api/2/get-wiki-page-history
"""
params = { "apiKey": self.apikey }
_addkw( params, "minId", minId )
_addkw( params, "maxId", maxId )
_addkw( params, "count", count )
_addkw( params, "order", order )
url = self._makeurl( "/api/v2/wikis/" + str( wikiId ) + "/history" )
return self._api_return(
requests.get( url, params = params ) )
def getWikiPageStar( self, wikiId ):
"""
https://developer.nulab-inc.com/docs/backlog/api/2/get-wiki-page-star
"""
params = { "apiKey": self.apikey }
url = self._makeurl( "/api/v2/wikis/" + str( wikiId ) + "/stars" )
return self._api_return(
requests.get( url, params = params ) )
def addStar( self,
issueId = None,
commentId = None,
wikiId = None,
pullRequestsId = None,
pullRequestCommentId = None ):
"""
https://developer.nulab-inc.com/docs/backlog/api/2/add-star
"""
params = { "apiKey": self.apikey }
data = {}
_addkw( data, "issueId", issueId )
_addkw( data, "commentId", commentId )
_addkw( | |
"""
This script prepares data in the format for the testing
algorithms to run
The script is expanded to the
"""
from __future__ import division
from queue import PriorityQueue
from datetime import datetime
import shared_variables
from shared_variables import get_unicode_from_int
import copy
import csv
import re
import time
import numpy as np
def prepare_testing_data(eventlog):
csvfile = open(shared_variables.data_folder + '%s.csv' % eventlog, 'r')
spamreader = csv.reader(csvfile, delimiter=',', quotechar='|')
next(spamreader, None) # skip the headers
lastcase = ''
line = ''
line_group = ''
line_time = ''
first_line = True
lines_id = []
lines = []
lines_group = []
lines_time = []
timeseqs = [] # relative time since previous event
timeseqs2 = [] # relative time since case start
timeseqs3 = [] # absolute time of previous event
timeseqs4 = [] # absolute time of event as a string
times = []
times2 = []
times3 = []
times4 = []
difflist = []
numlines = 0
casestarttime = None
lasteventtime = None
r = 3
for row in spamreader:
t1 = time.strptime(row[2], "%Y-%m-%d %H:%M:%S")
if row[0] != lastcase:
lastevent = t1
lastcase = row[0]
if row[1] != '0':
t2 = datetime.fromtimestamp(time.mktime(t1)) - datetime.fromtimestamp(time.mktime(lastevent))
tdiff = 86400 * t2.days + t2.seconds
else:
tdiff = 0
difflist.append(tdiff)
lastevent = t1
difflist = [int(i) for i in difflist]
maxdiff = max(difflist)
diff = maxdiff / r
# mediandiff = np.percentile(difflist, 50)
# diff = mediandiff / r
csvfile.seek(0)
next(spamreader, None) # skip the headers
row_index = 0
for row in spamreader:
t = time.strptime(row[2], "%Y-%m-%d %H:%M:%S")
if row[0] != lastcase:
casestarttime = t
lasteventtime = t
lastcase = row[0]
if not first_line:
lines.append(line)
lines_group.append(line_group)
lines_time.append(line_time)
timeseqs.append(times)
timeseqs2.append(times2)
timeseqs3.append(times3)
timeseqs4.append(times4)
lines_id.append(lastcase)
line = ''
line_group = ''
line_time = ''
times = []
times2 = []
times3 = []
times4 = []
numlines += 1
line += get_unicode_from_int(row[1])
line_group += get_unicode_from_int(row[3])
line_time += get_unicode_from_int(int(difflist[row_index] / diff))
if hasattr(time, 'tzset'):
# Move the time zone info into os.environ. See ticket #2315 for why
# we don't do this unconditionally (breaks Windows).
time.tzset()
timesincelastevent = datetime.fromtimestamp(time.mktime(t)) - datetime.fromtimestamp(time.mktime(lasteventtime))
timesincecasestart = datetime.fromtimestamp(time.mktime(t)) - datetime.fromtimestamp(time.mktime(casestarttime))
timediff = 86400 * timesincelastevent.days + timesincelastevent.seconds
timediff2 = 86400 * timesincecasestart.days + timesincecasestart.seconds
times.append(timediff)
times2.append(timediff2)
times3.append(datetime.fromtimestamp(time.mktime(t)))
times4.append(row[2])
lasteventtime = t
first_line = False
row_index += 1
# add last case
lines.append(line)
lines_group.append(line_group)
lines_time.append(line_time)
timeseqs.append(times)
timeseqs2.append(times2)
timeseqs3.append(times3)
timeseqs4.append(times4)
numlines += 1
divisor = np.mean([item for sublist in timeseqs for item in sublist])
divisor2 = np.mean([item for sublist in timeseqs2 for item in sublist])
#divisor3 = np.mean(map(lambda x: np.mean(map(lambda y: x[len(x) - 1] - y, x)), timeseqs2))
divisor3 = np.mean([np.mean([x[len(x) - 1] - y for y in x]) for x in timeseqs2])
elems_per_fold = int(round(numlines / 3))
fold1and2lines = lines[:2 * elems_per_fold]
#fold1and2lines = map(lambda x: x + '!', fold1and2lines)
#maxlen = max(map(lambda x: len(x), fold1and2lines))
fold1and2lines = [x + '!' for x in fold1and2lines]
maxlen = max([len(x) for x in fold1and2lines])
chars = list(map(lambda x: set(x), fold1and2lines))
chars = list(set().union(*chars))
chars.sort()
target_chars = copy.copy(chars)
if '!' in chars:
chars.remove('!')
char_indices = dict((c, i) for i, c in enumerate(chars))
target_char_indices = dict((c, i) for i, c in enumerate(target_chars))
target_indices_char = dict((i, c) for i, c in enumerate(target_chars))
fold1and2lines_group = lines_group[:2 * elems_per_fold]
# fold1and2lines_group = map(lambda x: x + '!', fold1and2lines_group)
chars_group = list(map(lambda x: set(x), fold1and2lines_group))
chars_group = list(set().union(*chars_group))
chars_group.sort()
target_chars_group = copy.copy(chars_group)
# chars_group.remove('!')
char_indices_group = dict((c, i) for i, c in enumerate(chars_group))
target_char_indices_group = dict((c, i) for i, c in enumerate(target_chars_group))
target_indices_char_group = dict((i, c) for i, c in enumerate(target_chars_group))
fold1and2lines_time = lines_time[:2 * elems_per_fold]
# fold1and2lines_time = map(lambda x: x + '!', fold1and2lines_time)
chars_time = list(map(lambda x: set(x), fold1and2lines_time))
chars_time = list(set().union(*chars_time))
chars_time.sort()
target_chars_time = copy.copy(chars_time)
# chars_time.remove('!')
char_indices_time = dict((c, i) for i, c in enumerate(chars_time))
target_char_indices_time = dict((c, i) for i, c in enumerate(target_chars_time))
target_indices_char_time = dict((i, c) for i, c in enumerate(target_chars_time))
# we only need the third fold, because first two were used for training
fold3 = lines[2 * elems_per_fold:]
fold3_id = lines_id[2 * elems_per_fold:]
fold3_group = lines_group[2 * elems_per_fold:]
fold3_time = lines_time[2 * elems_per_fold:]
fold3_t = timeseqs[2 * elems_per_fold:]
fold3_t2 = timeseqs2[2 * elems_per_fold:]
fold3_t3 = timeseqs3[2 * elems_per_fold:]
fold3_t4 = timeseqs4[2 * elems_per_fold:]
lines = fold3
lines_id = fold3_id
lines_group = fold3_group
lines_time = fold3_time
lines_t = fold3_t
lines_t2 = fold3_t2
lines_t3 = fold3_t3
lines_t4 = fold3_t4
# set parameters
predict_size = maxlen
return lines, \
lines_id, \
lines_group, \
lines_time, \
lines_t, \
lines_t2, \
lines_t3, \
lines_t4, \
maxlen, \
chars, \
chars_group, \
chars_time, \
char_indices, \
char_indices_group, \
char_indices_time, \
divisor, \
divisor2, \
divisor3, \
predict_size, \
target_indices_char, \
target_indices_char_group, \
target_indices_char_time, \
target_char_indices, \
target_char_indices_group, \
target_char_indices_time
# selects traces verified by a declare model
def select_declare_verified_traces(server_replayer, path_to_declare_model_file, lines, lines_id, lines_group, lines_time, lines_t, lines_t2,
lines_t3, lines_t4, prefix=0):
# select only lines with formula verified
lines_v = []
lines_id_v = []
lines_group_v = []
lines_time_v = []
lines_t_v = []
lines_t2_v = []
lines_t3_v = []
lines_t4_v = []
for line, line_id, line_group, line_time, times, times2, times3, times4 in zip(lines,
lines_id,
lines_group,
lines_time,
lines_t,
lines_t2,
lines_t3,
lines_t4):
if server_replayer.verify_with_elapsed_time(path_to_declare_model_file, line_id, line, line_group, line_time, times4, prefix):
lines_v.append(line)
lines_id_v.append(line_id)
lines_group_v.append(line_group)
lines_time_v.append(line_time)
lines_t_v.append(times)
lines_t2_v.append(times2)
lines_t3_v.append(times3)
lines_t4_v.append(times4)
return lines_v, lines_id_v, lines_group_v, lines_time_v, lines_t_v, lines_t2_v, lines_t3_v, lines_t4_v
# selects traces verified by LTL formula
def select_formula_verified_traces(server_replayer, lines, lines_id, lines_group, lines_time, lines_t, lines_t2, lines_t3,
lines_t4, formula, prefix=0):
# select only lines with formula verified
lines_v = []
lines_id_v = []
lines_group_v = []
lines_time_v = []
lines_t_v = []
lines_t2_v = []
lines_t3_v = []
lines_t4_v = []
for line, line_id, line_group, line_time, times, times2, times3, times4 in zip(lines,
lines_id,
lines_group,
lines_time,
lines_t,
lines_t2,
lines_t3,
lines_t4):
if server_replayer.verify_formula_as_compliant(line, formula, prefix):
lines_v.append(line)
lines_id_v.append(line_id)
lines_group_v.append(line_group)
lines_time_v.append(line_time)
lines_t_v.append(times)
lines_t2_v.append(times2)
lines_t3_v.append(times3)
lines_t4_v.append(times4)
return lines_v, lines_id_v, lines_group_v, lines_time_v, lines_t_v, lines_t2_v, lines_t3_v, lines_t4_v
# define helper functions
# this one encodes the current sentence into the onehot encoding
def encode(sentence, sentence_group, sentence_time, times, times3, maxlen, chars, chars_group, chars_time,
char_indices, char_indices_group, char_indices_time, divisor, divisor2):
num_features = len(chars) + len(chars_group) + len(chars_time) + 5
x = np.zeros((1, maxlen, num_features), dtype=np.float32)
leftpad = maxlen - len(sentence)
times2 = np.cumsum(times)
for t, char in enumerate(sentence):
midnight = times3[t].replace(hour=0, minute=0, second=0, microsecond=0)
timesincemidnight = times3[t] - midnight
for c in chars:
if c == char:
x[0, t + leftpad, char_indices[c]] = 1
for g in chars_group:
if g == sentence_group[t]:
x[0, t + leftpad, len(char_indices) + char_indices_group[g]] = 1
for y in chars_time:
if y == sentence_time[t]:
x[0, t + leftpad, len(char_indices) + len(char_indices_group) + char_indices_time[y]] = 1
x[0, t + leftpad, len(chars) + len(chars_group) + len(chars_time)] = t + 1
x[0, t + leftpad, len(chars) + len(chars_group) + len(chars_time) + 1] = times[t] / divisor
x[0, t + leftpad, len(chars) + len(chars_group) + len(chars_time) + 2] = times2[t] / divisor2
x[0, t + leftpad, len(chars) + len(chars_group) + len(chars_time) + 3] = timesincemidnight.seconds / 86400
x[0, t + leftpad, len(chars) + len(chars_group) + len(chars_time) + 4] = times3[t].weekday() / 7
return x
# modify to be able to get second best prediction
# def getSymbol(predictions, target_indices_char, ith_best=0):
# i = np.argsort(predictions)[len(predictions) - ith_best - 1]
# return target_indices_char[i]
# modify to be able to get second best prediction
def get_symbol_ampl(predictions, target_indices_char, target_char_indices, start_of_the_cycle_symbol,
stop_symbol_probability_amplifier_current, ith_best=0):
a_pred = list(predictions)
if start_of_the_cycle_symbol in target_char_indices:
place_of_starting_symbol = target_char_indices[start_of_the_cycle_symbol]
a_pred[place_of_starting_symbol] = a_pred[place_of_starting_symbol] / stop_symbol_probability_amplifier_current
i = np.argsort(a_pred)[len(a_pred) - ith_best - 1]
return target_indices_char[i]
# modify to be able to get second best prediction
def adjust_probabilities(predictions, target_char_indices, start_of_the_cycle_symbol,
stop_symbol_probability_amplifier_current):
a_pred = list(predictions)
if start_of_the_cycle_symbol in target_char_indices:
place_of_starting_symbol = target_char_indices[start_of_the_cycle_symbol]
a_pred[place_of_starting_symbol] = a_pred[place_of_starting_symbol] / stop_symbol_probability_amplifier_current
return a_pred
# find repetitions
def repetitions(s):
r = re.compile(r"(.+?)\1+")
for match in r.finditer(s):
yield (match.group(1), len(match.group(0)) / len(match.group(1)))
def amplify(s):
list_of_rep = list(repetitions(s))
if list_of_rep:
str_rep = list_of_rep[-1][0]
if s.endswith(str_rep):
return np.math.exp(list_of_rep[-1][-1]), list_of_rep[-1][0][0]
else:
return 1, list_of_rep[-1][0][0]
return 1, " "
def create_queue(activities, resources, times):
queue = PriorityQueue()
# resources_standardized = standardize_list(activities, resources)
for activity_index in range(len(activities)):
for resource_index in range(len(resources)):
for time_index in range(len(times)):
queue.put((-(np.log(activities[activity_index])+np.log(resources[resource_index])+np.log(times[time_index])),
[activity_index, resource_index, time_index]))
return | |
<filename>mindboggle/guts/compute.py
#!/usr/bin/env python
"""
Compute functions.
Authors:
- <NAME>, 2012-2016 (<EMAIL>) http://binarybottle.com
Copyright 2016, Mindboggle team (http://mindboggle.info), Apache v2.0 License
"""
def distcorr(X, Y):
"""
Compute the distance correlation function.
Parameters
----------
X : list or array of numbers
Y : list or array of numbers
Returns
-------
dcor : float
distance correlation
Examples
--------
>>> from mindboggle.guts.compute import distcorr
>>> import numpy as np
>>> a = [1,2,3,4,5]
>>> b = [1,2,9,4,4]
>>> dcor = distcorr(a, b)
>>> np.float("{0:.{1}f}".format(dcor, 5))
0.76268
Copyright (2014-2015) MIT
Written by <NAME> (Apache License v2.0) as part of the
mapalign GitHub repository (https://github.com/satra/mapalign)
"""
import numpy as np
from scipy.spatial.distance import pdist, squareform
X = np.atleast_1d(X)
Y = np.atleast_1d(Y)
if np.prod(X.shape) == len(X):
X = X[:, None]
if np.prod(Y.shape) == len(Y):
Y = Y[:, None]
X = np.atleast_2d(X)
Y = np.atleast_2d(Y)
n = X.shape[0]
if Y.shape[0] != X.shape[0]:
raise ValueError('Number of samples must match')
a = squareform(pdist(X))
b = squareform(pdist(Y))
A = a - a.mean(axis=0)[None, :] - a.mean(axis=1)[:, None] + a.mean()
B = b - b.mean(axis=0)[None, :] - b.mean(axis=1)[:, None] + b.mean()
dcov2_xy = (A * B).sum()/float(n * n)
dcov2_xx = (A * A).sum()/float(n * n)
dcov2_yy = (B * B).sum()/float(n * n)
dcor = np.sqrt(dcov2_xy)/np.sqrt(np.sqrt(dcov2_xx) * np.sqrt(dcov2_yy))
return dcor
def point_distance(point, points):
"""
Compute the Euclidean distance from one point to a second (set) of points.
Parameters
----------
point : list of three floats
coordinates for a single point
points : list with one or more lists of three floats
coordinates for a second point (or multiple points)
Returns
-------
min_distance : float
Euclidean distance between two points,
or the minimum distance between a point and a set of points
min_index : int
index of closest of the points (zero if only one)
Examples
--------
>>> from mindboggle.guts.compute import point_distance
>>> point = [1,2,3]
>>> points = [[10,2.0,3], [0,1.5,2]]
>>> point_distance(point, points)
(1.5, 1)
Notes
-----
Future plan is to use scipy.spatial.distance.cdist to compute distances
scipy.spatial.distance.cdist is available in scipy v0.12 or later
"""
import numpy as np
# If points is a single point
if np.ndim(points) == 1:
#return np.linalg.norm(np.array(point) - np.array(points))
return np.sqrt((point[0] - points[0]) ** 2 + \
(point[1] - points[1]) ** 2 + \
(point[2] - points[2]) ** 2), 0
# If points is a set of multiple points
elif np.ndim(points) == 2:
min_distance = np.Inf
min_index = 0
point = np.array(point)
for index, point2 in enumerate(points):
distance = np.sqrt((point[0] - point2[0]) ** 2 + \
(point[1] - point2[1]) ** 2 + \
(point[2] - point2[2]) ** 2)
#distance = np.linalg.norm(point - np.array(point2))
if distance < min_distance:
min_distance = distance
min_index = index
return min_distance, min_index
# Else return None
else:
return None, None
def vector_distance(vector1, vector2, normalize=False):
"""
Compute the Euclidean distance between two equal-sized vectors.
Parameters
----------
vector1 : numpy array of floats
vector of values
vector2 : numpy array of floats
vector of values
normalize : bool
normalize each element of the vectors?
Returns
-------
distance : float
Euclidean distance between two vectors
Examples
--------
>>> import numpy as np
>>> from mindboggle.guts.compute import vector_distance
>>> vector1 = np.array([1.,2.,3.])
>>> vector2 = np.array([0,1,5])
>>> distance = vector_distance(vector1, vector2)
>>> print('{0:0.5f}'.format(distance))
0.81650
"""
import numpy as np
if np.size(vector1) == np.size(vector2):
# Make sure arguments are numpy arrays
if not isinstance(vector1, np.ndarray):
vector1 = np.asarray(vector1)
if not isinstance(vector2, np.ndarray):
vector2 = np.asarray(vector2)
if normalize:
vector_diff = np.zeros(len(vector1))
for i in range(len(vector1)):
max_v1v2 = max([vector1[i], vector2[i]])
if max_v1v2 > 0:
vector_diff[i] = (vector1[i] - vector2[i]) / max_v1v2
else:
vector_diff = vector1 - vector2
return np.sqrt(sum((vector_diff)**2)) / np.size(vector1)
else:
print("Vectors have to be of equal size to compute distance.")
return None
def pairwise_vector_distances(vectors, save_file=False, normalize=False):
"""
Compare every pair of equal-sized vectors.
Parameters
----------
vectors : array of 1-D lists or arrays of integers or floats
save_file : bool
save file?
normalize : bool
normalize each element of the vectors?
Returns
-------
vector_distances : numpy array of integers or floats
distances between each pair of vectors
outfile : string [optional]
output filename for pairwise_vector_distances
Examples
--------
>>> import numpy as np
>>> from mindboggle.guts.compute import pairwise_vector_distances
>>> vectors = [[1,2,3],[0,3,5],[0,3.5,5],[1,1,1]]
>>> save_file = False
>>> normalize = False
>>> vector_distances, outfile = pairwise_vector_distances(vectors,
... save_file, normalize)
>>> print(np.array_str(np.array(vector_distances),
... precision=5, suppress_small=True))
[[0. 0.8165 0.89753 0.74536]
[0. 0. 0.16667 1.52753]
[0. 0. 0. 1.60728]
[0. 0. 0. 0. ]]
"""
import os
import numpy as np
from mindboggle.guts.compute import vector_distance
# Make sure argument is a numpy array
if not isinstance(vectors, np.ndarray):
vectors = np.array(vectors)
# Initialize output
vector_distances = np.zeros((len(vectors), len(vectors)))
# --------------------------------------------------------------------------
# Compute distance between each pair of vectors
# --------------------------------------------------------------------------
# Loop through every pair of vectors
for ihist1 in range(len(vectors)):
for ihist2 in range(len(vectors)):
if ihist2 >= ihist1:
# Store pairwise distances between histogram values
d = vector_distance(1.0*vectors[ihist1],
1.0*vectors[ihist2],
normalize=normalize)
vector_distances[ihist1, ihist2] = d
if save_file:
outfile = os.path.join(os.getcwd(), 'vector_distances.txt')
np.savetxt(outfile, vector_distances,
fmt=len(vectors) * '%.4f ', delimiter='\t', newline='\n')
if not os.path.exists(outfile):
raise IOError(outfile + " not found")
else:
outfile = ''
return vector_distances, outfile
def source_to_target_distances(sourceIDs, targetIDs, points,
segmentIDs=[], excludeIDs=[-1]):
"""
Create a Euclidean distance matrix between source and target points.
Compute the Euclidean distance from each source point to
its nearest target point, optionally within each segment.
Example::
Compute fundus-to-feature distances, the minimum distance
from each label boundary vertex (corresponding to a fundus
in the DKT cortical labeling protocol) to all of the
feature vertices in the same fold.
Parameters
----------
sourceIDs : list of N integers (N is the number of vertices)
source IDs, where any ID not in excludeIDs is a source point
targetIDs : list of N integers (N is the number of vertices)
target IDs, where any ID not in excludeIDs is a target point
points : list of N lists of three floats (N is the number of vertices)
coordinates of all vertices
segmentIDs : list of N integers (N is the number of vertices)
segment IDs, where each ID not in excludeIDs is considered a
different segment (unlike above, where value in sourceIDs or
targetIDs doesn't matter, so long as its not in excludeIDs);
source/target distances are computed within each segment
excludeIDs : list of integers
IDs to exclude
Returns
-------
distances : numpy array
distance value for each vertex (default -1)
distance_matrix : numpy array [#points by maximum segment ID + 1]
distances organized by segments (columns)
"""
import numpy as np
from mindboggle.guts.compute import point_distance
if isinstance(points, list):
points = np.asarray(points)
npoints = len(points)
# Extract unique segment IDs (or use all points as a single segment):
if np.size(segmentIDs):
segments = [x for x in np.unique(segmentIDs) if x not in excludeIDs]
else:
segmentIDs = np.zeros(npoints)
segments = [0]
nsegments = max(segments) + 1
# Initialize outputs:
distances = -1 * np.ones(npoints)
distance_matrix = -1 * np.ones((npoints, nsegments))
# For each segment:
for segment in segments:
segment_indices = [i for i,x in enumerate(segmentIDs)
if x == segment]
# Find all source points in the segment:
source_indices = [i for i,x in enumerate(sourceIDs)
if x not in excludeIDs
if i in segment_indices]
# Find all target points in the segment:
target_indices = [i for i,x in enumerate(targetIDs)
if x not in excludeIDs
if i in segment_indices]
if source_indices and target_indices:
# For each source point in the segment:
for isource in source_indices:
# Find the closest target point:
d, i = point_distance(points[isource],
points[target_indices])
distances[isource] = d
distance_matrix[isource, segment] = d
return distances, distance_matrix
def weighted_to_repeated_values(X, W=[], precision=1):
"""
Create a list of repeated values from weighted values.
This is useful for computing weighted statistics (ex: weighted median).
Adapted to allow for fractional weights from
http://stackoverflow.com/questions/966896/
code-golf-shortest-code-to-find-a-weighted-median
Parameters
----------
X : numpy array of floats or integers
values
W : numpy array of floats or integers
weights
precision : integer
number of decimal places to consider weights
Returns
-------
repeat_values : | |
<gh_stars>0
#!/usr/bin/python
'''The MIT License (MIT)
Copyright (c) 2017 <NAME>(<EMAIL>)
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.'''
__author__ = '<NAME> (<NAME>)'
__doc__ = '''
it's GUI of DasH aka Do as Human
created 2017-05-06 by <NAME>
'''
import webbrowser
from datetime import datetime
import wx.grid as gridlib
import traceback
import wx
from gui.MainFrame import MainFrame
import os
from lib.common import load_bench, caller_stack_info,info, get_next_in_ring_list,get_folder_item, info,debug, warn, error, parse_command_line, call_function_in_module
import re
import time
import threading
import ConfigParser
import sys
import inspect
import Queue
from SessionTab import SessionTab
import imp
import types
from lib.common import send_mail_smtp_without_login
from lib.common import run_script
from multiprocessing import Process
import subprocess
import shlex
#from dut import dut
DUT={}
class RedirectText(object):
font_point_size = 10
old_stdout = None
old_stderr = None
write_lock = None
log_file = None
error_pattern = None
font_point = None
previous_scroll_pos = 0
previous_insert_pos = 0
def __init__(self,aWxTextCtrl, log_path=None):
self.old_stderr , self.old_stdout=sys.stderr , sys.stdout
self.out=aWxTextCtrl
self.font_point_size = self.out.GetFont().PointSize+2
self.write_lock = threading.Lock()
self.error_pattern = re.compile('error|\s+err\s+|fail|wrong|errno')
self.font_point =self.out.GetFont().PointSize
if log_path:
name = '{}/dash.log'.format(log_path)
self.log_file = open(name, 'w+')
self.fileno = self.log_file.fileno
def write(self,string):
def __write(string):
#self.write_lock.acquire()
try:
self.old_stdout.write(string)
err_pattern = self.error_pattern#re.compile('error|\s+err\s+|fail|wrong')
current_scroll_pos = self.out.GetScrollPos(wx.VERTICAL)
current_insert_pos = self.out.GetInsertionPoint()
last_pos = self.out.GetLastPosition()
v_scroll_range = self.out.GetScrollRange(wx.VERTICAL)
char_height = self.out.GetCharHeight()
w_client,h_client = self.out.GetClientSize()
line_in_a_page= h_client/char_height*2/3
max_gap=line_in_a_page
c_col, c_line = self.out.PositionToXY(current_scroll_pos) #current_scroll_pos
t_col, t_line = self.out.PositionToXY(v_scroll_range) #v_scroll_range last_pos
x, y = c_col, c_line
real_gap = t_line- c_line
if real_gap>max_gap:#100
self.__freeze_main_log_window()
#self.previous_insert_pos = current_scroll_pos
#self.previous_scroll_pos = current_scroll_pos
else:
self.__thaw_main_log_window()
#tmp_msg ='\n!!!!! current {}, range {}, t_line {}, c_line {}, gap {}\n'.format(current_scroll_pos, v_scroll_range, t_line, c_line, t_line -c_line)
#string+=tmp_msg
#self.old_stdout.write()
if True:#err_pattern.search(string.lower()):
last_start = 0
for m in err_pattern.finditer(string.lower()):
self.out.SetDefaultStyle(wx.TextAttr(wx.GREEN, wx.BLACK,font =wx.Font(self.font_point, family = wx.DEFAULT, style = wx.NORMAL, weight = wx.NORMAL, faceName = 'Consolas')))
self.out.AppendText( string[last_start:m.start()])
self.out.SetDefaultStyle(wx.TextAttr(wx.RED, wx.YELLOW,font =wx.Font(self.font_point+2, family = wx.DEFAULT, style = wx.NORMAL, weight = wx.NORMAL, faceName = 'Consolas')))
self.out.AppendText( string[m.start():m.end()])
last_start= m.end()
self.out.SetDefaultStyle(wx.TextAttr(wx.GREEN, wx.BLACK,font =wx.Font(self.font_point, family = wx.DEFAULT, style = wx.NORMAL, weight = wx.NORMAL, faceName = 'Consolas')))
self.out.AppendText( string[last_start:])
else:
self.out.SetDefaultStyle(wx.TextAttr(wx.GREEN, wx.BLACK,font =wx.Font(self.font_point, family = wx.DEFAULT, style = wx.NORMAL, weight = wx.NORMAL, faceName = 'Consolas')))
self.out.AppendText( string)
if self.log_file:
self.log_file.write(string)
self.log_file.flush()
if real_gap>max_gap:#1000
#time.sleep(0.01)
pass
self.out.SetInsertionPoint( self.out.GetScrollPos(wx.VERTICAL))
#self.out.SetScrollPos(wx.VERTICAL, self.previous_scroll_pos)
#self.previous_insert_pos = current_scroll_pos
else:
#self.previous_scroll_pos= self.out.GetScrollRange(wx.VERTICAL)#v_scroll_range
#self.previous_insert_pos = last_pos+len(string)
self.out.SetScrollPos(wx.VERTICAL, self.out.GetScrollRange(wx.VERTICAL))
#self.out.SetScrollPos(wx.VERTICAL, self.previous_scroll_pos)
#self.out.SetInsertionPoint( self.previous_insert_pos) #self.out.ScrollToLine(c_line+line_in_a_page)
#pos =self.out.XYToPosition(xxx[0], xxx[1])
#self.out.ShowPosition(self.previous_insert_pos)
self.__thaw_main_log_window()
except Exception as e:
self.old_stdout.write('\n'+error(traceback.format_exc()))
#self.write_lock.release()
#time.sleep(0.1)
__write(string)
#threading.Thread(target=__write, args=[string]).start()
def close(self):
if self.log_file:
self.log_file.flush()
self.log_file.close()
def flush(self):
if self.log_file:
self.log_file.flush()
def __freeze_main_log_window(self):
#return
if self.out.IsFrozen():
pass
else:
#self.output_window_last_position =self.out.GetScrollRange(wx.VERTICAL)
self.out.Freeze()
def __thaw_main_log_window(self):
#self.out.SetScrollPos(wx.VERTICAL, self.previous_scroll_pos)
if self.out.IsFrozen():
self.out.Thaw()
else:
pass
class process_info(object):
process = None
pid=None
full_name=None
returncode = None
def __init__(self,name, process):
self.process= process
self.pid = process.pid
self.full_name =name
self.returncode = process.returncode
@property
def returncode(self):
return self.process.returncode
class FileEditor(wx.Panel):
editor =None
font_size=10
parent=None
type = None
sessions_node =None
function_node =None
case_suite_node =None
full_file_name = None
file_instance = None
name =''
def on_close(self):
if self.full_file_name:
data = self.editor.GetValue()
with open(self.full_file_name, 'w') as f:
f.write(data)
f.flush()
#done 2017-9-12: handle close tab in edit_area
def __init__(self, parent, title='pageOne', type ='grid', file_name = None):
wx.Panel.__init__(self, parent)
self.name = title
self.parent = parent
self.type = type
self.full_file_name = file_name
#self.editor = wx.TextCtrl(self, style = wx.TE_MULTILINE|wx.TE_RICH2|wx.EXPAND|wx.ALL, size=(-1,-1))
if type in ['text']:
self.editor = wx.TextCtrl( self, -1, wx.EmptyString, wx.DefaultPosition, wx.DefaultSize, wx.TE_AUTO_URL|wx.VSCROLL|wx.TE_RICH|wx.TE_MULTILINE&(~wx.TE_PROCESS_ENTER))
#wx.richtext.RichTextCtrl( self, -1, wx.EmptyString, wx.DefaultPosition, wx.DefaultSize, 0|wx.VSCROLL|wx.HSCROLL|wx.NO_BORDER|wx.WANTS_CHARS )
with open(self.full_file_name, 'r') as f:
for line in f.readlines():
self.editor.AppendText(line)
else:
self.editor= gridlib.Grid(self)
self.editor.CreateGrid(50, 5)
col = self.editor.GetNumberCols()
row = self.editor.GetNumberRows()
function_color ='black'
arg_color = 'blue'
for c in range(0, col):
if c < 1 :
self.editor.SetColLabelValue(c, 'Function Name')
else:
self.editor.SetColLabelValue(c, 'arg# {}'.format(c))
for r in range (0, row):
self.editor.SetCellTextColour(r,c,function_color if c <1 else arg_color)
for r in range (0, row):
self.editor.SetCellFont(r, 0, wx.Font(self.font_size,wx.SWISS, wx.NORMAL, wx.BOLD ))
self.editor.Bind( wx.EVT_MOUSEWHEEL, self.editor_OnMouseWheel )
sizer = wx.BoxSizer()
sizer.Add(self.editor, 1, wx.EXPAND)
self.SetSizer(sizer)
def editor_OnMouseWheel(self,event):
min_font_size = 5
interval_step = 2
if event.ControlDown():
pass
else:
return
if event.GetWheelRotation() < 0:
if self.font_size>min_font_size:
self.font_size-=interval_step
else:
self.font_size+=1
if self.type in ['text']:
f =self.editor.GetFont()
f.PointSize= self.font_size
self.editor.SetFont(f)
else:
col = self.editor.GetNumberCols()
row = self.editor.GetNumberRows()
for c in range(0, col):
for r in range (0, row):
f = self.editor.GetCellFont(r, c)
f.PointSize = self.font_size
self.editor.SetCellFont(r, c, f)
self.Refresh()
#wx.StaticText(self, -1, "THIS IS A PAGE OBJECT", (20,20))
#DONE: DasHFrame should handle CLOSE event when closing the app, call on_close_tab_in_edit_area for all opened sessions and files
from functools import wraps
import pprint
def gui_event_thread_handler( func):
@wraps(func)
def inner(func, *args, **kwargs):
ret =None
try:
ret = func(*args, **kwargs)
#th = threading.Thread(target=func,args= args, kwargs=kwargs)
#th.start()
except:
error(traceback.format_exc())
return ret
return inner
class gui_event_decorator():
def __init__(self):
pass
@classmethod
def gui_even_handle(self, func):
def inner(*args, **kwargs):
ret =None
try:
#print('decorator!!!')
#ret = func(*args, **kwargs)
th = threading.Thread(target=func,args= args, kwargs=kwargs)
th.start()
#print('decorator####')
except:
print(traceback.format_exc())
return ret
return inner
class DasHFrame(MainFrame, gui_event_decorator):#wx.Frame
ini_setting = None
#m_left_navigator =None
redir = None
edit_area=None
tabs_in_edit_area = None
src_path = './src/'
sessions_alive=None
sequence_queue=None
history_cmd = []
history_cmd_index = -1
import_modules={'TC':''}
lib_path ='./lib'
log_path = '../log/dash'
session_path = './sessions'
suite_path = '../test_suite'
dict_test_report= ''
alive =True
mail_server=None
mail_to_list=None
mail_from=None
mail_read_url= 'outlook.office365.com'
mail_password = <PASSWORD>
mail_user ='<EMAIL>'
case_queue =None
check_case_running_status_lock = None
case_list=None
#session_names={}
web_daemon = None
web_host = None
web_port = 8888
mailed_case_pids= []
timestamp=None
mail_failure =False
last_time_call_on_idle= None
ini_file=None
dict_function_obj= {'instance':{}}
dict_function_files = {}
updating_function_page =False
m_log_current_pos = None
def __init__(self,parent=None, ini_file = './gDasH.ini'):
#wx.Frame.__init__(self, None, title="DasH")
gui_event_decorator.__init__(self)
self.timestamp= datetime.now().isoformat('-').replace(':','-')
self.case_list= []
self.case_queue = Queue.Queue()
self.dict_test_report={}
self.check_case_running_status_lock = threading.Lock()
self.tabs_in_edit_area=[]
self.sessions_alive={}
MainFrame.__init__(self, parent=parent)
self.sequence_queue= Queue.Queue()
#self.sequence_queue.put()
self.ini_setting = ConfigParser.ConfigParser()
self.m_log_current_pos = 0
if os.path.exists(ini_file):
self.ini_setting.read(ini_file)
self.src_path = os.path.abspath(self.ini_setting.get('dash','src_path'))
self.lib_path = os.path.abspath(self.ini_setting.get('dash','lib_path'))
self.log_path = os.path.abspath(self.ini_setting.get('dash','log_path'))
self.suite_path = os.path.abspath(self.ini_setting.get('dash', 'test_suite_path'))
self.mail_server = self.ini_setting.get('dash', 'mail_server')
self.mail_from =self.ini_setting.get('dash', 'mail_from')
self.mail_to_list =self.ini_setting.get('dash', 'mail_to_list')
self.mail_read_url =self.ini_setting.get('dash', 'mail_read_url')
self.mail_user = self.ini_setting.get('dash','mail_user')
self.mail_password =self.ini_setting.get('dash', 'mail_password')
self.web_port =int(self.ini_setting.get('dash', 'web_port'))
else:
with open(ini_file, 'w') as tmp_ini_file:
tmp_ini_file.write('''[dash]
test_suite_path = ../test_suite/
log_path= {log_path}
lib_path = {lib_path}
session_path={session_path}
#the source python file folder
src_path = {src_path}
mail_server={mail_server}
mail_to_list={mail_to_list}
mail_user={mail_user}
mail_from ={mail_from}
mail_read_url={mail_read_url}
mail_password = {<PASSWORD>}
web_port={web_port}
'''.format(
log_path = self.log_path,
lib_path = self.lib_path,
session_path = self.session_path,
src_path = self.src_path,
mail_server = self.mail_server,
mail_to_list = self.mail_to_list,
mail_user = self.mail_user,
mail_from = self.mail_from,
mail_read_url = self.mail_read_url,
mail_password = self.mail_password,
web_port = self.web_port))
tmp_ini_file.flush()
#self.ini_setting.read(ini_file)
self.ini_file = ini_file
from lib.common import create_case_folder, create_dir
sys.argv.append('-l')
sys.argv.append('{}'.format(self.log_path))
from lib.common import create_dir
self.log_path = create_dir(self.log_path)
self.suite_path = create_dir(self.suite_path)
self.lib_path = create_dir(self.lib_path)
self.src_path = create_dir(self.src_path)
if not os.path.exists(self.log_path):
os.mkdir(self.log_path)
self.add_src_path_to_python_path(self.src_path)
self.redir = RedirectText(self.m_log, self.log_path)
sys.stdout = self.redir
sys.stderr = self.redir
self.m_log.SetBackgroundColour('Black')
self.m_log.SetDefaultStyle(wx.TextAttr(wx.GREEN, wx.BLACK, font =wx.Font(9, family = wx.DEFAULT, style = wx.NORMAL, weight = wx.BOLD, faceName = 'Consolas')))
#self.m_editor.WriteText('welcome to dash world')
self.m_log.WriteText('Welcome to DasH!\n')
self.m_command_box.WriteText('functions.static_function_in_module test_ssh 2')
fileMenu = wx.Menu()
#open_test_suite = fileMenu.Append(wx.NewId(), "Open TestSuite", "Open a Test Suite")
#open_test_case = fileMenu.Append(wx.NewId(), "Open TestCase", "Open a Test Case")
generate_test_report = fileMenu.Append(wx.NewId(), "Generate Test Report", "Generate Test Report")
generate_code = fileMenu.Append(wx.NewId(), "Generate Python Code", "Generate Python Code")
mail_test_report = fileMenu.Append(wx.NewId(), "Mail Test Report", "Mail Test Report")
get_case_queue = fileMenu.Append(wx.NewId(), "Get Case Queue", "Get Case Queue") #done
clear_case_queue = fileMenu.Append(wx.NewId(), "Clear Case Queue", "Clear Case Queue")
kill_running_case = fileMenu.Append(wx.NewId(), "Kill Running Case(s)", "Kill Running Case(s)")
self.m_menubar_main.Append(fileMenu, "&Operations")
self.Bind(wx.EVT_MENU,self.on_generate_test_report ,generate_test_report)
self.Bind(wx.EVT_MENU,self.on_generate_code ,generate_code)
self.Bind(wx.EVT_MENU,self.on_mail_test_report ,mail_test_report)
self.Bind(wx.EVT_MENU,self.get_case_queue ,get_case_queue)
self.Bind(wx.EVT_MENU,self.on_clear_case_queue ,clear_case_queue)
self.Bind(wx.EVT_MENU,self.on_kill_running_case ,kill_running_case)
self.Bind(wx.EVT_CLOSE, self.on_close)
self.m_command_box.Bind(wx.EVT_TEXT_ENTER, self.on_command_enter)
self.m_command_box.Bind(wx.EVT_KEY_UP, self.on_key_up)
self.m_command_box.Bind(wx.EVT_KEY_DOWN, self.on_key_down)
self.m_log.Bind(wx.EVT_TEXT, self.on_m_log_text_changed)
from wx.aui import AuiNotebook
bookStyle = wx.aui.AUI_NB_DEFAULT_STYLE &(~wx.aui.AUI_NB_CLOSE_ON_ACTIVE_TAB)
self.navigator = AuiNotebook(self.m_left_navigator, style= bookStyle )
self.case_suite_page = wx.TreeCtrl(self.navigator, wx.ID_ANY, wx.DefaultPosition, wx.Size(-1, -1), wx.TR_DEFAULT_STYLE | wx.TR_EDIT_LABELS | wx.TR_EXTENDED | wx.TR_HAS_BUTTONS | wx.TR_HAS_VARIABLE_ROW_HEIGHT | wx.HSCROLL | wx.TAB_TRAVERSAL | wx.VSCROLL | wx.WANTS_CHARS)
self.function_page = wx.TreeCtrl(self.navigator, | |
= None
self.success = None
self.error = None
APIREFRESHLOADBALANCERMSG_FULL_NAME = 'org.zstack.network.service.lb.APIRefreshLoadBalancerMsg'
class APIRefreshLoadBalancerMsg(object):
FULL_NAME='org.zstack.network.service.lb.APIRefreshLoadBalancerMsg'
def __init__(self):
#mandatory field
self.uuid = NotNoneField()
self.session = None
self.timeout = None
self.systemTags = OptionalList()
self.userTags = OptionalList()
APIREMOVEVMNICFROMLOADBALANCERMSG_FULL_NAME = 'org.zstack.network.service.lb.APIRemoveVmNicFromLoadBalancerMsg'
class APIRemoveVmNicFromLoadBalancerMsg(object):
FULL_NAME='org.zstack.network.service.lb.APIRemoveVmNicFromLoadBalancerMsg'
def __init__(self):
#mandatory field
self.vmNicUuids = NotNoneList()
#mandatory field
self.listenerUuid = NotNoneField()
self.session = None
self.timeout = None
self.systemTags = OptionalList()
self.userTags = OptionalList()
APIUPDATELOADBALANCERLISTENERMSG_FULL_NAME = 'org.zstack.network.service.lb.APIUpdateLoadBalancerListenerMsg'
class APIUpdateLoadBalancerListenerMsg(object):
FULL_NAME='org.zstack.network.service.lb.APIUpdateLoadBalancerListenerMsg'
def __init__(self):
#mandatory field
self.uuid = NotNoneField()
self.name = None
self.description = None
self.session = None
self.timeout = None
self.systemTags = OptionalList()
self.userTags = OptionalList()
APIUPDATELOADBALANCERMSG_FULL_NAME = 'org.zstack.network.service.lb.APIUpdateLoadBalancerMsg'
class APIUpdateLoadBalancerMsg(object):
FULL_NAME='org.zstack.network.service.lb.APIUpdateLoadBalancerMsg'
def __init__(self):
#mandatory field
self.uuid = NotNoneField()
self.name = None
self.description = None
self.resourceUuid = None
self.session = None
self.timeout = None
self.systemTags = OptionalList()
self.userTags = OptionalList()
APIATTACHPORTFORWARDINGRULEMSG_FULL_NAME = 'org.zstack.network.service.portforwarding.APIAttachPortForwardingRuleMsg'
class APIAttachPortForwardingRuleMsg(object):
FULL_NAME='org.zstack.network.service.portforwarding.APIAttachPortForwardingRuleMsg'
def __init__(self):
#mandatory field
self.ruleUuid = NotNoneField()
#mandatory field
self.vmNicUuid = NotNoneField()
self.session = None
self.timeout = None
self.systemTags = OptionalList()
self.userTags = OptionalList()
APICHANGEPORTFORWARDINGRULESTATEMSG_FULL_NAME = 'org.zstack.network.service.portforwarding.APIChangePortForwardingRuleStateMsg'
class APIChangePortForwardingRuleStateMsg(object):
FULL_NAME='org.zstack.network.service.portforwarding.APIChangePortForwardingRuleStateMsg'
def __init__(self):
#mandatory field
self.uuid = NotNoneField()
#mandatory field
#valid values: [enable, disable]
self.stateEvent = NotNoneField()
self.session = None
self.timeout = None
self.systemTags = OptionalList()
self.userTags = OptionalList()
APICREATEPORTFORWARDINGRULEMSG_FULL_NAME = 'org.zstack.network.service.portforwarding.APICreatePortForwardingRuleMsg'
class APICreatePortForwardingRuleMsg(object):
FULL_NAME='org.zstack.network.service.portforwarding.APICreatePortForwardingRuleMsg'
def __init__(self):
#mandatory field
self.vipUuid = NotNoneField()
#mandatory field
self.vipPortStart = NotNoneField()
self.vipPortEnd = None
self.privatePortStart = None
self.privatePortEnd = None
#mandatory field
#valid values: [TCP, UDP]
self.protocolType = NotNoneField()
self.vmNicUuid = None
self.allowedCidr = None
#mandatory field
self.name = NotNoneField()
self.description = None
self.resourceUuid = None
self.session = None
self.timeout = None
self.systemTags = OptionalList()
self.userTags = OptionalList()
APIDELETEPORTFORWARDINGRULEMSG_FULL_NAME = 'org.zstack.network.service.portforwarding.APIDeletePortForwardingRuleMsg'
class APIDeletePortForwardingRuleMsg(object):
FULL_NAME='org.zstack.network.service.portforwarding.APIDeletePortForwardingRuleMsg'
def __init__(self):
#mandatory field
self.uuid = NotNoneField()
self.deleteMode = None
self.session = None
self.timeout = None
self.systemTags = OptionalList()
self.userTags = OptionalList()
APIDETACHPORTFORWARDINGRULEMSG_FULL_NAME = 'org.zstack.network.service.portforwarding.APIDetachPortForwardingRuleMsg'
class APIDetachPortForwardingRuleMsg(object):
FULL_NAME='org.zstack.network.service.portforwarding.APIDetachPortForwardingRuleMsg'
def __init__(self):
#mandatory field
self.uuid = NotNoneField()
self.session = None
self.timeout = None
self.systemTags = OptionalList()
self.userTags = OptionalList()
APIGETPORTFORWARDINGATTACHABLEVMNICSMSG_FULL_NAME = 'org.zstack.network.service.portforwarding.APIGetPortForwardingAttachableVmNicsMsg'
class APIGetPortForwardingAttachableVmNicsMsg(object):
FULL_NAME='org.zstack.network.service.portforwarding.APIGetPortForwardingAttachableVmNicsMsg'
def __init__(self):
#mandatory field
self.ruleUuid = NotNoneField()
self.session = None
self.timeout = None
self.systemTags = OptionalList()
self.userTags = OptionalList()
APIGETPORTFORWARDINGATTACHABLEVMNICSREPLY_FULL_NAME = 'org.zstack.network.service.portforwarding.APIGetPortForwardingAttachableVmNicsReply'
class APIGetPortForwardingAttachableVmNicsReply(object):
FULL_NAME='org.zstack.network.service.portforwarding.APIGetPortForwardingAttachableVmNicsReply'
def __init__(self):
self.inventories = OptionalList()
self.success = None
self.error = None
APILISTPORTFORWARDINGRULEREPLY_FULL_NAME = 'org.zstack.network.service.portforwarding.APIListPortForwardingRuleReply'
class APIListPortForwardingRuleReply(object):
FULL_NAME='org.zstack.network.service.portforwarding.APIListPortForwardingRuleReply'
def __init__(self):
self.inventories = OptionalList()
self.success = None
self.error = None
APIQUERYPORTFORWARDINGRULEMSG_FULL_NAME = 'org.zstack.network.service.portforwarding.APIQueryPortForwardingRuleMsg'
class APIQueryPortForwardingRuleMsg(object):
FULL_NAME='org.zstack.network.service.portforwarding.APIQueryPortForwardingRuleMsg'
def __init__(self):
#mandatory field
self.conditions = NotNoneList()
self.limit = None
self.start = None
self.count = None
self.groupBy = None
self.replyWithCount = None
self.sortBy = None
#valid values: [asc, desc]
self.sortDirection = None
self.fields = OptionalList()
self.session = None
self.timeout = None
self.systemTags = OptionalList()
self.userTags = OptionalList()
APIQUERYPORTFORWARDINGRULEREPLY_FULL_NAME = 'org.zstack.network.service.portforwarding.APIQueryPortForwardingRuleReply'
class APIQueryPortForwardingRuleReply(object):
FULL_NAME='org.zstack.network.service.portforwarding.APIQueryPortForwardingRuleReply'
def __init__(self):
self.inventories = OptionalList()
self.total = None
self.success = None
self.error = None
APIUPDATEPORTFORWARDINGRULEMSG_FULL_NAME = 'org.zstack.network.service.portforwarding.APIUpdatePortForwardingRuleMsg'
class APIUpdatePortForwardingRuleMsg(object):
FULL_NAME='org.zstack.network.service.portforwarding.APIUpdatePortForwardingRuleMsg'
def __init__(self):
#mandatory field
self.uuid = NotNoneField()
self.name = None
self.description = None
self.session = None
self.timeout = None
self.systemTags = OptionalList()
self.userTags = OptionalList()
APICHANGEVIPSTATEMSG_FULL_NAME = 'org.zstack.network.service.vip.APIChangeVipStateMsg'
class APIChangeVipStateMsg(object):
FULL_NAME='org.zstack.network.service.vip.APIChangeVipStateMsg'
def __init__(self):
#mandatory field
self.uuid = NotNoneField()
#mandatory field
#valid values: [enable, disable]
self.stateEvent = NotNoneField()
self.session = None
self.timeout = None
self.systemTags = OptionalList()
self.userTags = OptionalList()
APICREATEVIPMSG_FULL_NAME = 'org.zstack.network.service.vip.APICreateVipMsg'
class APICreateVipMsg(object):
FULL_NAME='org.zstack.network.service.vip.APICreateVipMsg'
def __init__(self):
#mandatory field
self.name = NotNoneField()
self.description = None
#mandatory field
self.l3NetworkUuid = NotNoneField()
self.allocatorStrategy = None
self.requiredIp = None
self.resourceUuid = None
self.session = None
self.timeout = None
self.systemTags = OptionalList()
self.userTags = OptionalList()
APIDELETEVIPMSG_FULL_NAME = 'org.zstack.network.service.vip.APIDeleteVipMsg'
class APIDeleteVipMsg(object):
FULL_NAME='org.zstack.network.service.vip.APIDeleteVipMsg'
def __init__(self):
#mandatory field
self.uuid = NotNoneField()
self.deleteMode = None
self.session = None
self.timeout = None
self.systemTags = OptionalList()
self.userTags = OptionalList()
APIQUERYVIPMSG_FULL_NAME = 'org.zstack.network.service.vip.APIQueryVipMsg'
class APIQueryVipMsg(object):
FULL_NAME='org.zstack.network.service.vip.APIQueryVipMsg'
def __init__(self):
#mandatory field
self.conditions = NotNoneList()
self.limit = None
self.start = None
self.count = None
self.groupBy = None
self.replyWithCount = None
self.sortBy = None
#valid values: [asc, desc]
self.sortDirection = None
self.fields = OptionalList()
self.session = None
self.timeout = None
self.systemTags = OptionalList()
self.userTags = OptionalList()
APIQUERYVIPREPLY_FULL_NAME = 'org.zstack.network.service.vip.APIQueryVipReply'
class APIQueryVipReply(object):
FULL_NAME='org.zstack.network.service.vip.APIQueryVipReply'
def __init__(self):
self.inventories = OptionalList()
self.total = None
self.success = None
self.error = None
APIUPDATEVIPMSG_FULL_NAME = 'org.zstack.network.service.vip.APIUpdateVipMsg'
class APIUpdateVipMsg(object):
FULL_NAME='org.zstack.network.service.vip.APIUpdateVipMsg'
def __init__(self):
#mandatory field
self.uuid = NotNoneField()
self.name = None
self.description = None
self.session = None
self.timeout = None
self.systemTags = OptionalList()
self.userTags = OptionalList()
APICREATEVIRTUALROUTEROFFERINGMSG_FULL_NAME = 'org.zstack.network.service.virtualrouter.APICreateVirtualRouterOfferingMsg'
class APICreateVirtualRouterOfferingMsg(object):
FULL_NAME='org.zstack.network.service.virtualrouter.APICreateVirtualRouterOfferingMsg'
def __init__(self):
#mandatory field
self.zoneUuid = NotNoneField()
#mandatory field
self.managementNetworkUuid = NotNoneField()
#mandatory field
self.imageUuid = NotNoneField()
self.publicNetworkUuid = None
self.isDefault = None
#mandatory field
self.name = NotNoneField()
self.description = None
#mandatory field
self.cpuNum = NotNoneField()
#mandatory field
self.memorySize = NotNoneField()
self.allocatorStrategy = None
self.sortKey = None
self.type = None
self.resourceUuid = None
self.session = None
self.timeout = None
self.systemTags = OptionalList()
self.userTags = OptionalList()
APICREATEVIRTUALROUTERVMMSG_FULL_NAME = 'org.zstack.network.service.virtualrouter.APICreateVirtualRouterVmMsg'
class APICreateVirtualRouterVmMsg(object):
FULL_NAME='org.zstack.network.service.virtualrouter.APICreateVirtualRouterVmMsg'
def __init__(self):
#mandatory field
self.managementNetworkUuid = NotNoneField()
#mandatory field
self.publicNetworkUuid = NotNoneField()
#mandatory field
self.networkServicesProvided = NotNoneList()
#mandatory field
self.name = NotNoneField()
#mandatory field
self.instanceOfferingUuid = NotNoneField()
#mandatory field
self.imageUuid = NotNoneField()
#mandatory field
self.l3NetworkUuids = NotNoneList()
#valid values: [UserVm, ApplianceVm]
self.type = None
self.rootDiskOfferingUuid = None
self.dataDiskOfferingUuids = OptionalList()
self.zoneUuid = None
self.clusterUuid = None
self.hostUuid = None
self.primaryStorageUuidForRootVolume = None
self.description = None
self.defaultL3NetworkUuid = None
#valid values: [InstantStart, JustCreate]
self.strategy = None
self.resourceUuid = None
self.session = None
self.timeout = None
self.systemTags = OptionalList()
self.userTags = OptionalList()
APIGETATTACHABLEPUBLICL3FORVROUTERMSG_FULL_NAME = 'org.zstack.network.service.virtualrouter.APIGetAttachablePublicL3ForVRouterMsg'
class APIGetAttachablePublicL3ForVRouterMsg(object):
FULL_NAME='org.zstack.network.service.virtualrouter.APIGetAttachablePublicL3ForVRouterMsg'
def __init__(self):
#mandatory field
self.vmInstanceUuid = NotNoneField()
self.session = None
self.timeout = None
self.systemTags = OptionalList()
self.userTags = OptionalList()
APIGETATTACHABLEPUBLICL3FORVROUTERREPLY_FULL_NAME = 'org.zstack.network.service.virtualrouter.APIGetAttachablePublicL3ForVRouterReply'
class APIGetAttachablePublicL3ForVRouterReply(object):
FULL_NAME='org.zstack.network.service.virtualrouter.APIGetAttachablePublicL3ForVRouterReply'
def __init__(self):
self.inventories = OptionalList()
self.success = None
self.error = None
APIGETVIPUSEDPORTSMSG_FULL_NAME = 'org.zstack.network.service.virtualrouter.APIGetVipUsedPortsMsg'
class APIGetVipUsedPortsMsg(object):
FULL_NAME='org.zstack.network.service.virtualrouter.APIGetVipUsedPortsMsg'
def __init__(self):
#mandatory field
self.uuid = NotNoneField()
#mandatory field
#valid values: [TCP, UDP]
self.protocol = NotNoneField()
self.session = None
self.timeout = None
self.systemTags = OptionalList()
self.userTags = OptionalList()
APIGETVIPUSEDPORTSREPLY_FULL_NAME = 'org.zstack.network.service.virtualrouter.APIGetVipUsedPortsReply'
class APIGetVipUsedPortsReply(object):
FULL_NAME='org.zstack.network.service.virtualrouter.APIGetVipUsedPortsReply'
def __init__(self):
self.inventories = OptionalList()
self.success = None
self.error = None
APIGETVIRTUALROUTEROFFERINGREPLY_FULL_NAME = 'org.zstack.network.service.virtualrouter.APIGetVirtualRouterOfferingReply'
class APIGetVirtualRouterOfferingReply(object):
FULL_NAME='org.zstack.network.service.virtualrouter.APIGetVirtualRouterOfferingReply'
def __init__(self):
self.inventory = None
self.success = None
self.error = None
APIQUERYVIRTUALROUTEROFFERINGMSG_FULL_NAME = 'org.zstack.network.service.virtualrouter.APIQueryVirtualRouterOfferingMsg'
class APIQueryVirtualRouterOfferingMsg(object):
FULL_NAME='org.zstack.network.service.virtualrouter.APIQueryVirtualRouterOfferingMsg'
def __init__(self):
#mandatory field
self.conditions = NotNoneList()
self.limit = None
self.start = None
self.count = None
self.groupBy = None
self.replyWithCount = None
self.sortBy = None
#valid values: [asc, desc]
self.sortDirection = None
self.fields = OptionalList()
self.session = None
self.timeout = None
self.systemTags = OptionalList()
self.userTags = OptionalList()
APIQUERYVIRTUALROUTEROFFERINGREPLY_FULL_NAME = 'org.zstack.network.service.virtualrouter.APIQueryVirtualRouterOfferingReply'
class APIQueryVirtualRouterOfferingReply(object):
FULL_NAME='org.zstack.network.service.virtualrouter.APIQueryVirtualRouterOfferingReply'
def __init__(self):
self.inventories = OptionalList()
self.total = None
self.success = None
self.error = None
APIQUERYVIRTUALROUTERVMMSG_FULL_NAME = 'org.zstack.network.service.virtualrouter.APIQueryVirtualRouterVmMsg'
class APIQueryVirtualRouterVmMsg(object):
FULL_NAME='org.zstack.network.service.virtualrouter.APIQueryVirtualRouterVmMsg'
def __init__(self):
#mandatory field
self.conditions = NotNoneList()
self.limit = None
self.start = None
self.count = None
self.groupBy = None
self.replyWithCount = None
self.sortBy = None
#valid values: [asc, desc]
self.sortDirection = None
self.fields = OptionalList()
self.session = None
self.timeout = None
self.systemTags = OptionalList()
self.userTags = OptionalList()
APIQUERYVIRTUALROUTERVMREPLY_FULL_NAME = 'org.zstack.network.service.virtualrouter.APIQueryVirtualRouterVmReply'
class APIQueryVirtualRouterVmReply(object):
FULL_NAME='org.zstack.network.service.virtualrouter.APIQueryVirtualRouterVmReply'
def __init__(self):
self.inventories = OptionalList()
self.total = None
self.success = None
self.error = None
APIRECONNECTVIRTUALROUTERMSG_FULL_NAME = 'org.zstack.network.service.virtualrouter.APIReconnectVirtualRouterMsg'
class APIReconnectVirtualRouterMsg(object):
FULL_NAME='org.zstack.network.service.virtualrouter.APIReconnectVirtualRouterMsg'
def __init__(self):
#mandatory field
self.vmInstanceUuid = NotNoneField()
self.session = None
self.timeout = None
self.systemTags = OptionalList()
self.userTags = OptionalList()
APISEARCHVIRTUALROUTEROFFINGREPLY_FULL_NAME = 'org.zstack.network.service.virtualrouter.APISearchVirtualRouterOffingReply'
class APISearchVirtualRouterOffingReply(object):
FULL_NAME='org.zstack.network.service.virtualrouter.APISearchVirtualRouterOffingReply'
def __init__(self):
self.content = None
self.success = None
self.error = None
APISEARCHVIRTUALROUTERVMREPLY_FULL_NAME = 'org.zstack.network.service.virtualrouter.APISearchVirtualRouterVmReply'
class APISearchVirtualRouterVmReply(object):
FULL_NAME='org.zstack.network.service.virtualrouter.APISearchVirtualRouterVmReply'
def __init__(self):
self.content = None
self.success = None
self.error = None
APIUPDATEVIRTUALROUTEROFFERINGMSG_FULL_NAME = 'org.zstack.network.service.virtualrouter.APIUpdateVirtualRouterOfferingMsg'
class APIUpdateVirtualRouterOfferingMsg(object):
FULL_NAME='org.zstack.network.service.virtualrouter.APIUpdateVirtualRouterOfferingMsg'
def __init__(self):
self.isDefault = None
self.imageUuid = None
#mandatory field
self.uuid = NotNoneField()
self.name = None
self.description = None
self.allocatorStrategy = None
self.session = None
self.timeout = None
self.systemTags = OptionalList()
self.userTags = OptionalList()
APIATTACHPCIDEVICETOVMMSG_FULL_NAME = 'org.zstack.pciDevice.APIAttachPciDeviceToVmMsg'
class APIAttachPciDeviceToVmMsg(object):
FULL_NAME='org.zstack.pciDevice.APIAttachPciDeviceToVmMsg'
def __init__(self):
#mandatory field
self.pciDeviceUuid = NotNoneField()
#mandatory field
self.vmInstanceUuid = NotNoneField()
self.session = None
self.timeout = None
self.systemTags = OptionalList()
self.userTags = OptionalList()
APICREATEPCIDEVICEOFFERINGMSG_FULL_NAME = 'org.zstack.pciDevice.APICreatePciDeviceOfferingMsg'
class APICreatePciDeviceOfferingMsg(object):
FULL_NAME='org.zstack.pciDevice.APICreatePciDeviceOfferingMsg'
def __init__(self):
#mandatory field
self.name = NotNoneField()
self.description = None
self.type = None
#mandatory field
self.vendorId = NotNoneField()
#mandatory field
self.deviceId = NotNoneField()
self.subvendorId = None
self.subdeviceId = None
self.resourceUuid = None
self.session = None
self.timeout = None
self.systemTags = OptionalList()
self.userTags = OptionalList()
APIDELETEPCIDEVICEMSG_FULL_NAME = 'org.zstack.pciDevice.APIDeletePciDeviceMsg'
class APIDeletePciDeviceMsg(object):
FULL_NAME='org.zstack.pciDevice.APIDeletePciDeviceMsg'
def __init__(self):
#mandatory field
self.uuid = NotNoneField()
self.deleteMode = None
self.session = None
self.timeout = None
self.systemTags = OptionalList()
self.userTags = OptionalList()
APIDELETEPCIDEVICEOFFERINGMSG_FULL_NAME = 'org.zstack.pciDevice.APIDeletePciDeviceOfferingMsg'
class APIDeletePciDeviceOfferingMsg(object):
FULL_NAME='org.zstack.pciDevice.APIDeletePciDeviceOfferingMsg'
def __init__(self):
#mandatory field
self.uuid = NotNoneField()
self.session = None
self.timeout = None
self.systemTags = OptionalList()
self.userTags = OptionalList()
APIDETACHPCIDEVICEFROMVMMSG_FULL_NAME = 'org.zstack.pciDevice.APIDetachPciDeviceFromVmMsg'
class APIDetachPciDeviceFromVmMsg(object):
FULL_NAME='org.zstack.pciDevice.APIDetachPciDeviceFromVmMsg'
def __init__(self):
#mandatory field
self.pciDeviceUuid = NotNoneField()
#mandatory field
self.vmInstanceUuid = NotNoneField()
self.session = None
self.timeout = None
self.systemTags = | |
optional: Optional[bool] = None):
if key is not None:
pulumi.set(__self__, "key", key)
if name is not None:
pulumi.set(__self__, "name", name)
if optional is not None:
pulumi.set(__self__, "optional", optional)
@property
@pulumi.getter
def key(self) -> Optional[str]:
return pulumi.get(self, "key")
@property
@pulumi.getter
def name(self) -> Optional[str]:
return pulumi.get(self, "name")
@property
@pulumi.getter
def optional(self) -> Optional[bool]:
return pulumi.get(self, "optional")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class StorageClusterSpecNodesEnvValueFromFieldRef(dict):
def __init__(__self__, *,
api_version: Optional[str] = None,
field_path: Optional[str] = None):
if api_version is not None:
pulumi.set(__self__, "api_version", api_version)
if field_path is not None:
pulumi.set(__self__, "field_path", field_path)
@property
@pulumi.getter(name="apiVersion")
def api_version(self) -> Optional[str]:
return pulumi.get(self, "api_version")
@property
@pulumi.getter(name="fieldPath")
def field_path(self) -> Optional[str]:
return pulumi.get(self, "field_path")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class StorageClusterSpecNodesEnvValueFromResourceFieldRef(dict):
def __init__(__self__, *,
container_name: Optional[str] = None,
divisor: Optional[str] = None,
resource: Optional[str] = None):
if container_name is not None:
pulumi.set(__self__, "container_name", container_name)
if divisor is not None:
pulumi.set(__self__, "divisor", divisor)
if resource is not None:
pulumi.set(__self__, "resource", resource)
@property
@pulumi.getter(name="containerName")
def container_name(self) -> Optional[str]:
return pulumi.get(self, "container_name")
@property
@pulumi.getter
def divisor(self) -> Optional[str]:
return pulumi.get(self, "divisor")
@property
@pulumi.getter
def resource(self) -> Optional[str]:
return pulumi.get(self, "resource")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class StorageClusterSpecNodesEnvValueFromSecretKeyRef(dict):
def __init__(__self__, *,
key: Optional[str] = None,
name: Optional[str] = None,
optional: Optional[bool] = None):
if key is not None:
pulumi.set(__self__, "key", key)
if name is not None:
pulumi.set(__self__, "name", name)
if optional is not None:
pulumi.set(__self__, "optional", optional)
@property
@pulumi.getter
def key(self) -> Optional[str]:
return pulumi.get(self, "key")
@property
@pulumi.getter
def name(self) -> Optional[str]:
return pulumi.get(self, "name")
@property
@pulumi.getter
def optional(self) -> Optional[bool]:
return pulumi.get(self, "optional")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class StorageClusterSpecNodesNetwork(dict):
"""
Contains network information that is needed by the storage driver.
"""
def __init__(__self__, *,
data_interface: Optional[str] = None,
mgmt_interface: Optional[str] = None):
"""
Contains network information that is needed by the storage driver.
:param str data_interface: Name of the network interface used by the storage driver for data traffic.
:param str mgmt_interface: Name of the network interface used by the storage driver for management traffic.
"""
if data_interface is not None:
pulumi.set(__self__, "data_interface", data_interface)
if mgmt_interface is not None:
pulumi.set(__self__, "mgmt_interface", mgmt_interface)
@property
@pulumi.getter(name="dataInterface")
def data_interface(self) -> Optional[str]:
"""
Name of the network interface used by the storage driver for data traffic.
"""
return pulumi.get(self, "data_interface")
@property
@pulumi.getter(name="mgmtInterface")
def mgmt_interface(self) -> Optional[str]:
"""
Name of the network interface used by the storage driver for management traffic.
"""
return pulumi.get(self, "mgmt_interface")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class StorageClusterSpecNodesSelector(dict):
"""
Configuration in this node block is applied to nodes based on this selector. Use either nodeName of labelSelector, not both. If nodeName is used, labelSelector will be ignored.
"""
def __init__(__self__, *,
label_selector: Optional['outputs.StorageClusterSpecNodesSelectorLabelSelector'] = None,
node_name: Optional[str] = None):
"""
Configuration in this node block is applied to nodes based on this selector. Use either nodeName of labelSelector, not both. If nodeName is used, labelSelector will be ignored.
:param 'StorageClusterSpecNodesSelectorLabelSelectorArgs' label_selector: It is a label query over all the nodes. The result of matchLabels and matchExpressions is ANDed. An empty label selector matches all nodes. A null label selector matches no objects.
:param str node_name: Name of the Kubernetes node that is to be selected. If present then the labelSelector is ignored even if the node with the given name is absent and the labelSelector matches another node.
"""
if label_selector is not None:
pulumi.set(__self__, "label_selector", label_selector)
if node_name is not None:
pulumi.set(__self__, "node_name", node_name)
@property
@pulumi.getter(name="labelSelector")
def label_selector(self) -> Optional['outputs.StorageClusterSpecNodesSelectorLabelSelector']:
"""
It is a label query over all the nodes. The result of matchLabels and matchExpressions is ANDed. An empty label selector matches all nodes. A null label selector matches no objects.
"""
return pulumi.get(self, "label_selector")
@property
@pulumi.getter(name="nodeName")
def node_name(self) -> Optional[str]:
"""
Name of the Kubernetes node that is to be selected. If present then the labelSelector is ignored even if the node with the given name is absent and the labelSelector matches another node.
"""
return pulumi.get(self, "node_name")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class StorageClusterSpecNodesSelectorLabelSelector(dict):
"""
It is a label query over all the nodes. The result of matchLabels and matchExpressions is ANDed. An empty label selector matches all nodes. A null label selector matches no objects.
"""
def __init__(__self__, *,
match_expressions: Optional[Sequence['outputs.StorageClusterSpecNodesSelectorLabelSelectorMatchExpressions']] = None,
match_labels: Optional[Mapping[str, Any]] = None):
"""
It is a label query over all the nodes. The result of matchLabels and matchExpressions is ANDed. An empty label selector matches all nodes. A null label selector matches no objects.
:param Sequence['StorageClusterSpecNodesSelectorLabelSelectorMatchExpressionsArgs'] match_expressions: It is a list of label selector requirements. The requirements are ANDed.
:param Mapping[str, Any] match_labels: It is a map of key-value pairs. A single key-value in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.
"""
if match_expressions is not None:
pulumi.set(__self__, "match_expressions", match_expressions)
if match_labels is not None:
pulumi.set(__self__, "match_labels", match_labels)
@property
@pulumi.getter(name="matchExpressions")
def match_expressions(self) -> Optional[Sequence['outputs.StorageClusterSpecNodesSelectorLabelSelectorMatchExpressions']]:
"""
It is a list of label selector requirements. The requirements are ANDed.
"""
return pulumi.get(self, "match_expressions")
@property
@pulumi.getter(name="matchLabels")
def match_labels(self) -> Optional[Mapping[str, Any]]:
"""
It is a map of key-value pairs. A single key-value in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.
"""
return pulumi.get(self, "match_labels")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class StorageClusterSpecNodesSelectorLabelSelectorMatchExpressions(dict):
def __init__(__self__, *,
key: Optional[str] = None,
operator: Optional[str] = None,
values: Optional[Sequence[str]] = None):
"""
:param str key: It is the label key that the selector applies to.
:param str operator: It represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.
:param Sequence[str] values: It is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty.
"""
if key is not None:
pulumi.set(__self__, "key", key)
if operator is not None:
pulumi.set(__self__, "operator", operator)
if values is not None:
pulumi.set(__self__, "values", values)
@property
@pulumi.getter
def key(self) -> Optional[str]:
"""
It is the label key that the selector applies to.
"""
return pulumi.get(self, "key")
@property
@pulumi.getter
def operator(self) -> Optional[str]:
"""
It represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.
"""
return pulumi.get(self, "operator")
@property
@pulumi.getter
def values(self) -> Optional[Sequence[str]]:
"""
It is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty.
"""
return pulumi.get(self, "values")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class StorageClusterSpecNodesStorage(dict):
"""
Details of the storage used by the storage driver.
"""
def __init__(__self__, *,
devices: Optional[Sequence[str]] = None,
force_use_disks: Optional[bool] = None,
journal_device: Optional[str] = None,
kvdb_device: Optional[str] = None,
system_metadata_device: Optional[str] = None,
use_all: Optional[bool] = None,
use_all_with_partitions: Optional[bool] = None):
"""
Details of the storage used by the storage driver.
:param Sequence[str] devices: List of devices to be used by the storage driver.
:param bool force_use_disks: Flag indicating to use the devices even if there is file system present on it. Note that the devices may be wiped before using.
:param str journal_device: Device used for journaling.
:param str kvdb_device: Device used for internal KVDB.
:param str system_metadata_device: Device that will be used to store system metadata by the driver.
:param bool use_all: Use all available, unformatted, unpartitioned devices. This will be ignored if spec.storage.devices is not empty.
:param bool use_all_with_partitions: Use all available unformatted devices. This will be ignored if spec.storage.devices is not empty.
"""
if devices is not None:
pulumi.set(__self__, "devices", devices)
if force_use_disks is not None:
pulumi.set(__self__, "force_use_disks", force_use_disks)
if | |
Controller -> Radio
1: CLR
Radio -> Controller
1: CLR,OK
NOTE: This command is only acceptable in Programming Mode.
This command needs about 10 seconds execution time.
"""
self.device.serial.timeout = 10
response = self.device.command('CLR')
self.device.serial.timeout = self.device.timeout
return response
@property
def backlight(self):
"""
Get Backlight
Get Backlight Setting.
Controller -> Radio
1: BLT
Radio -> Controller
1: BLT,##
## : Backlight Setting (see BACKLIGHT class)
NOTE: This command is only acceptable in Programming Mode.
"""
return self.device.command('BLT')
@backlight.setter
def backlight(self, value):
"""
Set Backlight
Set Backlight Setting.
Controller -> Radio
1: BLT,##
## : Backlight Setting (see BACKLIGHT class)
Radio -> Controller
1: BLT,OK
NOTE: This command is only acceptable in Programming Mode.
"""
self.device.command('BLT', value)
@property
def battery_save(self):
"""
Get Battery Save
Get Battery Save Setting.
Controller -> Radio
1: BSV
Radio -> Controller
1: BSV,#
# : Battery Save Setting (See TOGGLE class)
NOTE: This command is only acceptable in Programming Mode.
"""
return int(self.device.command('BSV'))
@battery_save.setter
def battery_save(self, value):
"""
Set Battery Save
Set Battery Save Setting.
Controller -> Radio
1: BSV,#
# : Battery Save Setting (See TOGGLE class)
Radio -> Controller
1: BSV,OK
NOTE: This command is only acceptable in Programming Mode.
"""
self.device.command('BSV', value)
@property
def key_beep(self):
"""
Get Key Beep
Get Key Beep Setting.
Controller -> Radio
1: KBP
Radio -> Controller
1: KBP,#
# : Key Beep Setting (See TOGGLE class)
NOTE: This command is only acceptable in Programming Mode.
"""
return int(self.device.command('KBP'))
@key_beep.setter
def key_beep(self, value):
"""
Set Key Beep
Set Key Beep Setting.
Controller -> Radio
1: KBP,#
# : Key Beep Setting (See TOGGLE class)
Radio -> Controller
1: KBP,OK
NOTE: This command is only acceptable in Programming Mode.
"""
self.device.command('KBP', value)
@property
def opening_message(self):
"""
Get Opening Message
Controller -> Radio
1: OMS
Radio -> Controller
1: OMS,[L1_CHAR],[L2_CHAR]
[L1_CHAR] : Line1 Characters (max 16char)
[L2_CHAR] : Line2 Characters (max 16char)
NOTE: This command is only acceptable in Programming Mode.
If only space code is set in character area, the command returns the default message.
"""
return self.device.command('OMS', keys=('l1_char', 'l2_char'))
@opening_message.setter
def opening_message(self, (l1_char, l2_char)):
"""
Set Opening Message
Controller -> Radio
1: OMS,[L1_CHAR],[L2_CHAR]
[L1_CHAR] : Line1 Characters (max 16char)
[L2_CHAR] : Line2 Characters (max 16char)
Radio -> Controller
1: OMS,OK
NOTE: This command is only acceptable in Programming Mode.
"""
self.device.command('OMS', l1_char, l2_char)
@property
def priority_mode(self):
"""
Get Priority Mode
Get Priority Mode Setting.
Controller -> Radio
1: PRI
Radio -> Controller
1: PRI,#
# : Priority Mode Setting (See TOGGLE class)
NOTE: This command is only acceptable in Programming Mode.
"""
return int(self.device.command('PRI'))
@priority_mode.setter
def priority_mode(self, value):
"""
Set Priority Mode
Set Priority Mode Setting.
Controller -> Radio
1: PRI,#
# : Priority Mode Setting (See TOGGLE class)
Radio -> Controller
1: PRI,OK
NOTE: This command is only acceptable in Programming Mode.
"""
self.device.command('PRI', value)
Settings.TOGGLE = TOGGLE
class System(object):
""" Device System """
class SYS_TYPE:
CONVENTIONAL = 'CNV'
MOT_800_T2_STD = 'M82S'
MOT_800_T2_SPL = 'M82P'
MOT_900_T2 = 'M92'
MOT_VHF_T2 = 'MV2'
MOT_UHF_T2 = 'MU2'
MOT_800_T1_STD = 'M81S'
MOT_800_T1_SPL = 'M81P'
EDACS_NARROW = 'EDN'
EDACS_WIDE = 'EDW'
EDACS_SCAT = 'EDS'
LTR = 'LTR'
MOT_800_T2_CUS = 'M82C'
MOT_800_T1_CUS = 'M81C'
class QUICK_KEY:
ONE = 1
TWO = 2
THREE = 3
FOUR = 4
FIVE = 5
SIX = 6
SEVEN = 7
EIGHT = 8
NINE = 9
TEN = 0
NONE = '.'
class LOUT:
UNLOCKED = 0
LOCKED = 1
class EMG:
IGNORE = 0
ALERT = 1
def __init__(self, device, index=None, sys_type=SYS_TYPE.CONVENTIONAL, name=None, quick_key=None, hld=None, lout=None, att=None, dly=None, skp=None, emg=None):
""" Initialize System """
self.device = device
self.index = index
self.sys_type = sys_type
self.name = name
self.quick_key = quick_key
self.hld = hld
self.lout = lout
self.att = att
self.dly = dly
self.skp = skp
self.emg = emg
self.rev_index = -1
self.fwd_index = -1
self.chn_grp_head = -1
self.chn_grp_tail = -1
self.seq_no = -1
@property
def index(self):
""" System Index getter """
return int(getattr(self, '_index', -1))
@index.setter
def index(self, index):
""" System Index setter """
self._index = index
if index:
info = self.device.systems.info(index)
map(lambda (k, v): setattr(self, k, v), info.items())
def info(self):
""" Device.Systems.info pass-through """
if not self.index:
raise CommandUnavailableException
return self.device.systems.info(self.index)
def remove(self):
""" Device.Systems.remove pass-through """
if not self.index:
raise CommandUnavailableException
return self.device.systems.remove(self.index)
def group_quick_lockout(self, value):
""" Device.Systems.group_quick_lockout pass-through """
if not self.index:
raise CommandUnavailableException
return self.device.systems.group_quick_lockout(self.index)
System.TOGGLE = TOGGLE
class Systems(list):
""" Device Systems """
def __init__(self, device):
""" Initialize Systems """
self.device = device
def __len__(self):
"""
Get System Count
Returns the number of stored Systems.
Controller -> Radio
1: SCT
Radio -> Controller
1:SCT,###
### : Number of Systems (0-200)
NOTE: This command is only acceptable in Programming Mode.
"""
return int(self.device.command('SCT'))
def __iter__(self):
for i in range(self.head, self.tail):
yield self[i]
def __getitem__(self, index):
system = Device.System(self.device)
system.index = index
return system
def __str__(self):
return str(range(self.head, self.tail))
@property
def head(self):
"""
Get System Index Head
Returns the first index of stored system list.
Controller -> Radio
1: SIH
Radio -> Controller
1:SIH,[SYS_INDEX]
[SYS_INDEX] : System Index
NOTE: This command is only acceptable in Programming Mode.
"""
return int(self.device.command('SIH'))
@property
def tail(self):
"""
Get System Index Tail
Returns the last index of stored system list.
Controller -> Radio
1: SIT
Radio -> Controller
1:SIT,[SYS_INDEX]
[SYS_INDEX] : System Index
NOTE: This command is only acceptable in Programming Mode.
"""
return int(self.device.command('SIT'))
def append(self, system):
"""
Create System
Creates a system and returns created system index.
Controller -> Radio
1: CSY,[SYS_TYPE]
[SYS_TYPE] : System Type (See Device.System.SYS_TYPE class)
Radio -> Controller
1: CSY,[SYS_INDEX]
[SYS_INDEX] : The Index if Created System
NOTE: The index is a handle to get/set system information.
Returns -1 if the scanner failed to create because of no resource.
This command is only acceptable in Programming Mode.
"""
system.index = self.device.command('CSY', system.sys_type)
def remove(self, index):
"""
Delete System
Deletes a system.
Controller -> Radio
1: DSY,[SYS_INDEX]
[SYS_INDEX] : System Index
Radio -> Controller
1: DSY,OK
NOTE: This command is only acceptable in Programming Mode.
"""
return self.device.command('DSY', index)
def info(self, index, name=None, quick_key=None, hld=None, lout=None, att=None, dly=None, skp=None, emg=None):
"""
Get/Set System Info
Get/Set System Information.
Controller -> Radio
1: SIN,[INDEX]
[INDEX] : System Index
2: SIN,[SYS_TYPE],[NAME],[QUICK_KEY],[HLD],[LOUT],[ATT],[DLY],[SKP],[EMG]
[SYS_TYPE] : System Type (See Device.System.SYS_TYPE class)
[NAME] : Name (max 16char)
[QUICK_KEY] : Quick Key (See QUICK_KEY class)
[HLD] : System Hold Time (0-255)
[LOUT] : Lockout (See LOUT class)
[ATT] : Attenuation (See TOGGLE class)
[DLY] : Delay Time (0-5)
[SKP] : Data Skip (See TOGGLE class)
[EMG] : Emergency Alert (See EMG class)
Radio -> Controller
1: SIN,[SYS_TYPE],[NAME],[QUICK_KEY],[HLD],[LOUT],[ATT],[DLY],[SKP],[EMG],[REV_INDEX],[FWD_INDEX],[CHN_GRP_HEAD],[CHN_GRP_TAIL],[SEQ_NO]
[SYS_TYPE] : System Type (See Device.System.SYS_TYPE class)
[NAME] : Name (max 16char)
[QUICK_KEY] : Quick Key (See QUICK_KEY class)
[HLD] : System Hold Time (0-255)
[LOUT] : Lockout (See LOUT class)
[ATT] : Attenuation (See TOGGLE class)
[DLY] : Delay Time (0-5)
[SKP] : Data Skip (See TOGGLE class)
[EMG] : Emergency Alert (See EMG class)
[REV_INDEX] : Reverse System Index
[FWD_INDEX] : Forward System Index
[CHN_GRP_HEAD] : Channel Group Index Head
[CHN_GRP_TAIL] : Channel Group Index Tail
[SEQ_NO] : System Sequence Number (1-200)
2: SIN,OK
NOTE: The scanner does not return a value for parameters which are not appropriate for the system type.
The scanner does not set a value for parameters which are not appropriate for the system type.
Only provided parameters are changed.
The command is aborted if any format error is detected.
This command is only acceptable in Programming Mode.
"""
return self.device.command('SIN', index, keys=('sys_type', 'name', 'quick_key', 'hld', 'lout', 'att', 'dly', 'skp', 'emg', 'rev_index', 'fwd_index', 'chn_grp_head', 'chn_grp_tail', 'seq_no'))
@property
def system_quick_lockout(self):
"""
Get System Quick Lockout
Returns the System Quick Key status.
Controller -> Radio
1: QSL
Radio -> Controller
1: QSL,##########
########## : System Quick Key status (See TOGGLE class)
NOTE: This command is only acceptable in Programming Mode.
| |
Security)
self.security._validate()
@property
def log_level(self):
return self._log_level
@log_level.setter
def log_level(self, log_level):
self._log_level = LogLevel(log_level)
def __repr__(self):
return 'Master<...>'
@classmethod
@implements(Specification.from_dict)
def from_dict(cls, obj, **kwargs):
cls._check_keys(obj)
obj = obj.copy()
log_config = obj.pop('log_config', None)
if log_config is not None:
log_config = File.from_dict(log_config, **kwargs)
security = obj.pop('security', None)
if security is not None:
security = Security.from_dict(security, **kwargs)
resources = obj.pop('resources', None)
if resources is not None:
resources = Resources.from_dict(resources)
files = obj.pop('files', None)
if files is not None:
files = {k: File.from_dict(v, **kwargs) for k, v in files.items()}
return cls(log_config=log_config,
security=security,
resources=resources,
files=files,
**obj)
@classmethod
@implements(Specification.from_protobuf)
def from_protobuf(cls, obj):
resources = Resources.from_protobuf(obj.resources)
files = {k: File.from_protobuf(v) for k, v in obj.files.items()}
log_level = _proto.Log.Level.Name(obj.log_level)
log_config = (File.from_protobuf(obj.log_config)
if obj.HasField('log_config')
else None)
security = (Security.from_protobuf(obj.security)
if obj.HasField('security')
else None)
return cls(resources=resources,
files=files,
script=obj.script,
env=dict(obj.env),
log_level=log_level,
log_config=log_config,
security=security)
class DelegationTokenProvider(Specification):
"""Configuration for the Delegation Token Provider.
Parameters
----------
name : str
Describes the name system for which to get the delegation token. Ex: 'hive'
config : dict
A mapping that contains the configuration to connect to external systems
to get the delegation token
"""
__slots__ = ('name', 'config')
_params = ('name', 'config')
_protobuf_cls = _proto.DelegationTokenProviderSpec
def __init__(self, name='', config=None):
self.name = name
# in one of the tests `config` is a protobuf ScalarMapContainer and I convert it to `dict`
self.config = {} if config is None else dict(config)
self._validate()
def _validate(self):
self._check_is_type('name', str)
self._check_is_dict_of('config', str, str)
def __repr__(self):
return 'DelegationTokenProvider<...>'
@classmethod
@implements(Specification.from_dict)
def from_dict(cls, obj, **kwargs):
cls._check_keys(obj)
obj = obj.copy()
name = obj.pop('name', None)
config = obj.pop('config', None)
return cls(name=name,
config=config,
**obj)
@classmethod
@implements(Specification.from_protobuf)
def from_protobuf(cls, obj):
return cls(name=obj.name,
config=obj.config)
class ApplicationSpec(Specification):
"""A complete description of an application.
Parameters
----------
services : dict, optional
A mapping of service-name to services. Applications must either specify
at least one service, or a script for the application master to run
(see ``skein.Master`` for more information).
master : Master, optional
Additional configuration for the application master service. See
``skein.Master`` for more information.
name : str, optional
The name of the application, defaults to 'skein'.
queue : str, optional
The queue to submit to. Defaults to the default queue.
user : str, optional
The user name to submit the application as. Requires that the
submitting user have permission to proxy as this user name. Default is
the submitter's user name.
node_label : str, optional
The node label expression to use when requesting containers for this
application. Services can override this setting by specifying
``node_label`` on the service directly. Default is no label.
tags : set, optional
A set of strings to use as tags for this application.
file_systems : list, optional
A list of Hadoop file systems to acquire delegation tokens for.
A token is always acquired for the ``defaultFS``.
delegation_token_providers : list, optional
A list of mappings.
Each mapping is for configuring a connection with an external system
and get a delegation token.
acls : ACLs, optional
Allows restricting users/groups to subsets of application access. See
``skein.ACLs`` for more information.
max_attempts : int, optional
The maximum number of submission attempts before marking the
application as failed. Note that this only considers failures of the
application master during startup. Default is 1.
"""
__slots__ = ('services', 'master', 'name', 'queue', 'user', 'node_label',
'tags', 'file_systems', 'delegation_token_providers', 'acls', 'max_attempts')
_protobuf_cls = _proto.ApplicationSpec
def __init__(self, services=None, master=None, name='skein',
queue='default', user='', node_label='', tags=None,
file_systems=None, delegation_token_providers=None, acls=None, max_attempts=1):
self.services = {} if services is None else services
self.master = Master() if master is None else master
self.name = name
self.queue = queue
self.user = user
self.node_label = node_label
self.tags = set() if tags is None else set(tags)
self.file_systems = [] if file_systems is None else file_systems
self.delegation_token_providers = \
[] if delegation_token_providers is None else delegation_token_providers
self.acls = ACLs() if acls is None else acls
self.max_attempts = max_attempts
self._validate()
def __repr__(self):
return ('ApplicationSpec<name=%r, queue=%r, services=...>' %
(self.name, self.queue))
def _validate(self):
self._check_is_type('name', str)
self._check_is_type('queue', str)
self._check_is_type('user', str)
self._check_is_type('node_label', str)
self._check_is_set_of('tags', str)
self._check_is_list_of('file_systems', str)
#self._check_is_list_of('delegation_token_providers', DelegationTokenProvider)
self._check_is_type('delegation_token_providers', list)
self._check_is_bounded_int('max_attempts', min=1)
self._check_is_type('acls', ACLs)
self.acls._validate()
self._check_is_type('master', Master)
self.master._validate()
self._check_is_dict_of('services', str, Service)
if not self.services and not self.master.script:
raise context.ValueError("There must be at least one service")
for name, service in self.services.items():
service._validate()
missing = set(service.depends).difference(self.services)
if missing:
raise context.ValueError(
"Unknown service dependencies for service %r:\n"
"%s" % (name, format_list(missing)))
dependencies = {name: service.depends
for name, service in self.services.items()}
check_no_cycles(dependencies)
@classmethod
def _from_any(cls, spec):
"""Generic creation method for all types accepted as ``spec``"""
if isinstance(spec, str):
spec = cls.from_file(spec)
elif isinstance(spec, dict):
spec = cls.from_dict(spec)
elif not isinstance(spec, cls):
raise context.TypeError("spec must be either an ApplicationSpec, "
"path, or dict, got "
"%s" % type(spec).__name__)
return spec
@classmethod
@implements(Specification.from_dict)
def from_dict(cls, obj, **kwargs):
_origin = _pop_origin(kwargs)
cls._check_keys(obj)
obj = obj.copy()
services = obj.pop('services', None)
if services is not None and isinstance(services, dict):
services = {k: Service.from_dict(v, _origin=_origin)
for k, v in services.items()}
acls = obj.pop('acls', None)
if acls is not None and isinstance(acls, dict):
acls = ACLs.from_dict(acls)
master = obj.pop('master', None)
if master is not None and isinstance(master, dict):
master = Master.from_dict(master, _origin=_origin)
return cls(services=services, acls=acls, master=master, **obj)
@classmethod
@implements(Specification.from_protobuf)
def from_protobuf(cls, obj):
services = {k: Service.from_protobuf(v)
for k, v in obj.services.items()}
delegation_token_providers = [DelegationTokenProvider.from_protobuf(p)
for p in obj.delegation_token_providers]
return cls(name=obj.name,
queue=obj.queue,
user=obj.user,
node_label=obj.node_label,
tags=set(obj.tags),
file_systems=list(obj.file_systems),
delegation_token_providers=delegation_token_providers,
max_attempts=min(1, obj.max_attempts),
acls=ACLs.from_protobuf(obj.acls),
master=Master.from_protobuf(obj.master),
services=services)
@classmethod
def from_file(cls, path, format='infer'):
"""Create an instance from a json or yaml file.
Parameters
----------
path : str
The path to the file to load.
format : {'infer', 'json', 'yaml'}, optional
The file format. By default the format is inferred from the file
extension.
"""
format = _infer_format(path, format=format)
origin = os.path.abspath(os.path.dirname(path))
with open(path) as f:
data = f.read()
if format == 'json':
obj = json.loads(data)
else:
obj = yaml.safe_load(data)
return cls.from_dict(obj, _origin=origin)
def to_file(self, path, format='infer', skip_nulls=True):
"""Write object to a file.
Parameters
----------
path : str
The path to the file to load.
format : {'infer', 'json', 'yaml'}, optional
The file format. By default the format is inferred from the file
extension.
skip_nulls : bool, optional
By default null values are skipped in the output. Set to True to
output all fields.
"""
format = _infer_format(path, format=format)
data = getattr(self, 'to_' + format)(skip_nulls=skip_nulls)
with open(path, mode='w') as f:
f.write(data)
class ResourceUsageReport(ProtobufMessage):
"""Resource usage report.
Parameters
----------
memory_seconds : int
The total amount of memory (in MBs) the application has allocated times
the number of seconds the application has been running.
vcore_seconds : int
The total number of vcores that the application has allocated times the
number of seconds the application has been running.
num_used_containers : int
Current number of containers in use.
needed_resources : Resources
The needed resources.
reserved_resources : Resources
The reserved resources.
used_resources : Resources
The used resources.
"""
__slots__ = ('memory_seconds', 'vcore_seconds', 'num_used_containers',
'needed_resources', 'reserved_resources', 'used_resources')
_protobuf_cls = _proto.ResourceUsageReport
def __init__(self, memory_seconds, vcore_seconds, num_used_containers,
needed_resources, reserved_resources, used_resources):
self.memory_seconds = memory_seconds
self.vcore_seconds = vcore_seconds
self.num_used_containers = num_used_containers
self.needed_resources = needed_resources
self.reserved_resources = reserved_resources
self.used_resources = used_resources
self._validate()
def __repr__(self):
return 'ResourceUsageReport<...>'
def _validate(self):
for k in ['memory_seconds', 'vcore_seconds', 'num_used_containers']:
self._check_is_bounded_int(k)
for k in ['needed_resources', 'reserved_resources', 'used_resources']:
self._check_is_type(k, Resources)
getattr(self, k)._validate()
@classmethod
@implements(Specification.from_protobuf)
def from_protobuf(cls, obj):
kwargs = dict(memory_seconds=obj.memory_seconds,
vcore_seconds=obj.vcore_seconds,
num_used_containers=obj.num_used_containers)
for k in ['needed_resources', 'reserved_resources', 'used_resources']:
kwargs[k] = Resources.from_protobuf(getattr(obj, k))
return cls(**kwargs)
class ApplicationReport(ProtobufMessage):
"""Report of application status.
Parameters
----------
id : str
The application ID.
name : str
The application name.
user : str
The user that started the application.
queue : str
The application queue.
tags : set of strings
The application tags.
host : str
The host the application master is running on.
port : int
The rpc port for the application master
tracking_url : str
The application tracking url.
state : ApplicationState
The application state.
final_status : FinalStatus
The application final status.
progress : float
The progress of the application, from 0.0 to 1.0.
usage : ResourceUsageReport
Report on application resource usage.
diagnostics : str
| |
"""
A simple and basic Python 3 https://aoe2.net/ API wrapper for sending `GET requests`.
Available on GitHub (+ documentation): https://github.com/sixP-NaraKa/aoe2net-api-wrapper
Additional data manipulation/extraction from the provided data by this API wrapper has to be done by you, the user.
See https://aoe2.net/#api & https://aoe2.net/#nightbot.
"""
import requests
import json as jsn
# api base urls
API_BASE_URL = "https://aoe2.net/api"
NIGHTBOT_BASE_URL = API_BASE_URL + "/nightbot" # "https://aoe2.net/api/nightbot"
# request api base urls (api endpoints)
STRINGS_URL = API_BASE_URL + "/strings"
LEADERBOARD_URL = API_BASE_URL + "/leaderboard"
LOBBIES_URL = API_BASE_URL + "/lobbies"
LAST_MATCH_URL = API_BASE_URL + "/player/lastmatch"
MATCH_HISTORY_URL = API_BASE_URL + "/player/matches"
RATING_HISTORY_URL = API_BASE_URL + "/player/ratinghistory"
MATCHES_URL = API_BASE_URL + "/matches"
MATCH_URL = API_BASE_URL + "/match"
NUMBERS_ONLINE_URL = API_BASE_URL + "/stats/players"
# request nightbot api base urls (api endpoints)
RANK_DETAILS_URL = NIGHTBOT_BASE_URL + "/rank?"
RECENT_OPPONENT_URL = NIGHTBOT_BASE_URL + "/opponent?"
CURRENT_MATCH_URL = NIGHTBOT_BASE_URL + "/match?"
CURRENT_CIVS_URL = NIGHTBOT_BASE_URL + "/civs?"
CURRENT_MAP_URL = NIGHTBOT_BASE_URL + "/map?"
# request headers
headers = {'content-type': 'application/json;charset=UTF-8'}
# simple base exception class, to raise errors with
class Aoe2NetException(Exception):
""" AoE2.net API error. """
""" ----------------------------------------------- HELPER FUNCTIONS -----------------------------------------------"""
def _is_valid_kwarg(provided: dict, available: dict):
"""
Helper function to check if a user provided dictionary has the correct arguments,
compared to a dictionary with the actual available arguments.
Updates, if no difference found, the dictionary 'available'.
Parameters
----------
provided : `dict`
The user defined dictionary of optional additional arguments.
available : `dict`
The available optional additional arguments possible.
:raises KeyError:
invalid additional keyword argument supplied
"""
diff = provided.keys() - available.keys()
if diff: # if there are differences
msg = "invalid optional keyword argument passed: {}. Available arguments: {}".format(diff, list(available.keys()))
raise KeyError(msg)
available.update(provided)
return available
def _get_request_response(url: str, params: dict = None, json: bool = True):
"""
Helper function to request data.
For the NIGHTBOT_API calls, the returned data is not JSON, but plain text.
Each of those functions will return the response.text explicitly.
Parameters
----------
url : `str`
The request to call the API with.
params : `dict`
A dictionary of parameters that will be used for a GET request.
json : `bool`
Specifies if the request response should be returned in JSON format. Defaults to True.
:return:
the request response
:raises requests.exceptions.RequestException:
if a exception happens during the request handling
:raises Aoe2NetExecution:
if status code of the response is not 200
"""
try:
response = requests.get(url, params=params, headers=headers)
except requests.exceptions.RequestException as rer:
raise requests.exceptions.RequestException(rer)
if response.status_code != 200:
msg = "Expected status code 200 - got {}.".format(response.status_code)
raise Aoe2NetException(msg)
if json:
try:
response = response.json()
except jsn.JSONDecodeError as jde:
raise Aoe2NetException(jde)
return response
""" ------------------------------------------- API REQUESTS (class API) -------------------------------------------"""
class API:
"""
The 'API' class encompasses the https://aoe2.net/#api API functions,
which can return their requested data in JSON format.
"""
def get_strings(self, game: str = "aoe2de", json: bool = True):
"""
Requests a list of strings used by the API.
Parameters
----------
game : `str`
The game for which to extract the list of strings. Defaults to "aoe2de" if omitted.
Possible games:
aoe2hd -> Age of Empires 2: HD Edition, aoe2de -> Age of Empires 2: Definitive Edition
json : `bool`
Specifies to the '_get_request_response()' function if the request response should be returned in JSON format.
Defaults to True.
:return:
the data in json format (if set), otherwise the plain response object.
"""
return _get_request_response(url=STRINGS_URL, params={"game": game}, json=json)
def get_leaderboard(self, leaderboard_id: int = 3, start: int = 1, count: int = 10, json: bool = True, **kwargs):
"""
Requests the data of the given leaderboard, specified by the 'leaderboard_id'.
Parameters
----------
leaderboard_id : `int`
The leaderboard in which to extract data in. Defaults to ID 3 (1v1 RM).
Possible IDs:
0 -> Unranked, 1 -> 1v1 Deathmatch, 2 -> Team Deathmatch, 3 -> 1v1 Random Map, 4 -> Team Random Map
start : `int`
Specifies the start point for which to extract data at. Defaults to 1.
Ignored if 'search', 'steam_id' or 'profile_id' are defined.
count : `int`
Specifies how many entries of the given leaderboard should be extracted,
if able to find with the given criteria. Defaults to 10.
Max. 10000.
json : `bool`
Specifies to the '_get_request_response()' function if the request response should be returned in JSON format.
Defaults to True.
**kwargs : `dict`
Additional optional arguments.
Possible arguments:
search : `str`
Specifies a player name to search for. All players found that match the given name will be returned.
steam_id : `str`
The steamID64 of a player. (ex: 76561199003184910)
Takes precedence over both 'search' and 'profile_id'.
profile_id : `str`
The profile ID. (ex: 459658)
Takes precedence over 'search'.
:return:
the data in json format (if set), otherwise the plain response object.
:raises Aoe2NetException:
'count' has to be 10000 or less.
"""
if count > 10000:
raise Aoe2NetException("'count' has to be 10000 or less.")
optionals = {
"search": "",
"steam_id": "",
"profile_id": "",
}
optionals = _is_valid_kwarg(kwargs, optionals)
params = {"game": "aoe2de", "leaderboard_id": leaderboard_id, "start": start, "count": count}
params.update(optionals)
return _get_request_response(url=LEADERBOARD_URL, params=params, json=json)
def get_open_lobbies(self, game: str = "aoe2de", json: bool = True):
"""
Requests all open lobbies.
Parameters
----------
game : `str`
The game for which to extract the lobby data. Defaults to "aoe2de" if omitted.
Possible games:
aoe2hd -> Age of Empires 2: HD Edition, aoe2de -> Age of Empires 2: Definitive Edition
json : `bool`
Specifies to the '_get_request_response()' function if the request response should be returned in JSON format.
Defaults to True.
:return:
the data in json format (if set), otherwise the plain response object.
"""
params = {"game": game}
return _get_request_response(url=LOBBIES_URL, params=params, json=json)
def get_last_match(self, steam_id: str = "", profile_id: str = "", json: bool = True):
"""
Requests the last match a player started playing.
This will be the current match if they still are in game.
Either 'steam_id' or 'profile_id' required.
Parameters
----------
steam_id : `str`
The steamID64 of a player. (ex: 76561199003184910)
Takes precedence over 'profile_id'.
profile_id : `str`
The profile ID. (ex: 459658)
Defaults to an empty string.
json : `bool`
Specifies to the '_get_request_response()' function if the request response should be returned in JSON format.
Defaults to True.
:return:
the data in json format (if set), otherwise the plain response object.
:raises Aoe2NetException:
Either 'steam_id' or 'profile_id' required.
"""
if not steam_id and not profile_id:
raise Aoe2NetException("Either 'steam_id' or 'profile_id' required.")
params = {"steam_id": steam_id, "profile_id": profile_id}
return _get_request_response(url=LAST_MATCH_URL, params=params, json=json)
def get_match_history(self, start: int = 0, count: int = 5, steam_id: str = "", profile_id: str = "", json: bool = True):
"""
Requests the match history for a player.
Either 'steam_id' or 'profile_id' required.
Parameters
---------
start : `int`
Specifies the start point for which to extract data at. Defaults to 0 (most recent match).
count : `int`
Specifies how many entries of the given leaderboard should be extracted,
if able to find with the given criteria. Defaults to 5.
Max. 1000.
steam_id : `str`
The steamID64 of a player. (ex: 76561199003184910)
Takes precedence over 'profile_id'.
profile_id : `str`
The profile ID. (ex: 459658)
Defaults to an empty string.
json : `bool`
Specifies to the '_get_request_response()' function if the request response should be returned in JSON format.
Defaults to True.
:return:
the data in json format (if set), otherwise the plain response object.
:raises Aoe2NetException:
'count' has to be 1000 or less. || Either 'steam_id' or 'profile_id' required.
"""
if count > 1000:
raise Aoe2NetException("'count' has to be 1000 or less.")
if not steam_id and not profile_id:
raise Aoe2NetException("Either 'steam_id' or 'profile_id' required.")
params = {"start": start, "count": count, "steam_id": steam_id, "profile_id": profile_id}
return _get_request_response(url=MATCH_HISTORY_URL, params=params, json=json)
def get_rating_history(self, leaderboard_id: int = 3, start: int = 0, count: int = 100, steam_id: str = "", profile_id: str = "", json: bool = True):
"""
Requests the rating history for a player.
Either 'steam_id' or 'profile_id' required.
Parameters
---------
leaderboard_id : `int`
The leaderboard in which to extract data in. Defaults to ID 3 (1v1 RM).
Possible IDs:
0 -> Unranked, 1 -> 1v1 Deathmatch, 2 -> Team Deathmatch, 3 -> 1v1 | |
Fields:
exportUri: Required. A Google Cloud Storage URI for the exported BAM file.
The currently authenticated user must have write access to the new file.
An error will be returned if the URI already contains data.
projectId: Required. The Google Developers Console project ID that owns
this export. The caller must have WRITE access to this project.
referenceNames: The reference names to export. If this is not specified,
all reference sequences, including unmapped reads, are exported. Use `*`
to export only unmapped reads.
"""
exportUri = _messages.StringField(1)
projectId = _messages.StringField(2)
referenceNames = _messages.StringField(3, repeated=True)
class ExportVariantSetRequest(_messages.Message):
"""The variant data export request.
Enums:
FormatValueValuesEnum: The format for the exported data.
Fields:
bigqueryDataset: Required. The BigQuery dataset to export data to. This
dataset must already exist. Note that this is distinct from the Genomics
concept of "dataset".
bigqueryTable: Required. The BigQuery table to export data to. If the
table doesn't exist, it will be created. If it already exists, it will
be overwritten.
callSetIds: If provided, only variant call information from the specified
call sets will be exported. By default all variant calls are exported.
format: The format for the exported data.
projectId: Required. The Google Cloud project ID that owns the destination
BigQuery dataset. The caller must have WRITE access to this project.
This project will also own the resulting export job.
"""
class FormatValueValuesEnum(_messages.Enum):
"""The format for the exported data.
Values:
FORMAT_UNSPECIFIED: <no description>
FORMAT_BIGQUERY: Export the data to Google BigQuery.
"""
FORMAT_UNSPECIFIED = 0
FORMAT_BIGQUERY = 1
bigqueryDataset = _messages.StringField(1)
bigqueryTable = _messages.StringField(2)
callSetIds = _messages.StringField(3, repeated=True)
format = _messages.EnumField('FormatValueValuesEnum', 4)
projectId = _messages.StringField(5)
class ExternalId(_messages.Message):
"""A ExternalId object.
Fields:
id: The id used by the source of this data.
sourceName: The name of the source of this data.
"""
id = _messages.StringField(1)
sourceName = _messages.StringField(2)
class GenomicsAnnotationsDeleteRequest(_messages.Message):
"""A GenomicsAnnotationsDeleteRequest object.
Fields:
annotationId: The ID of the annotation to be deleted.
"""
annotationId = _messages.StringField(1, required=True)
class GenomicsAnnotationsGetRequest(_messages.Message):
"""A GenomicsAnnotationsGetRequest object.
Fields:
annotationId: The ID of the annotation to be retrieved.
"""
annotationId = _messages.StringField(1, required=True)
class GenomicsAnnotationsUpdateRequest(_messages.Message):
"""A GenomicsAnnotationsUpdateRequest object.
Fields:
annotation: A Annotation resource to be passed as the request body.
annotationId: The ID of the annotation to be updated.
updateMask: An optional mask specifying which fields to update. Mutable
fields are name, variant, transcript, and info. If unspecified, all
mutable fields will be updated.
"""
annotation = _messages.MessageField('Annotation', 1)
annotationId = _messages.StringField(2, required=True)
updateMask = _messages.StringField(3)
class GenomicsAnnotationsetsDeleteRequest(_messages.Message):
"""A GenomicsAnnotationsetsDeleteRequest object.
Fields:
annotationSetId: The ID of the annotation set to be deleted.
"""
annotationSetId = _messages.StringField(1, required=True)
class GenomicsAnnotationsetsGetRequest(_messages.Message):
"""A GenomicsAnnotationsetsGetRequest object.
Fields:
annotationSetId: The ID of the annotation set to be retrieved.
"""
annotationSetId = _messages.StringField(1, required=True)
class GenomicsAnnotationsetsUpdateRequest(_messages.Message):
"""A GenomicsAnnotationsetsUpdateRequest object.
Fields:
annotationSet: A AnnotationSet resource to be passed as the request body.
annotationSetId: The ID of the annotation set to be updated.
updateMask: An optional mask specifying which fields to update. Mutable
fields are name, source_uri, and info. If unspecified, all mutable
fields will be updated.
"""
annotationSet = _messages.MessageField('AnnotationSet', 1)
annotationSetId = _messages.StringField(2, required=True)
updateMask = _messages.StringField(3)
class GenomicsCallsetsDeleteRequest(_messages.Message):
"""A GenomicsCallsetsDeleteRequest object.
Fields:
callSetId: The ID of the call set to be deleted.
"""
callSetId = _messages.StringField(1, required=True)
class GenomicsCallsetsGetRequest(_messages.Message):
"""A GenomicsCallsetsGetRequest object.
Fields:
callSetId: The ID of the call set.
"""
callSetId = _messages.StringField(1, required=True)
class GenomicsCallsetsPatchRequest(_messages.Message):
"""A GenomicsCallsetsPatchRequest object.
Fields:
callSet: A CallSet resource to be passed as the request body.
callSetId: The ID of the call set to be updated.
updateMask: An optional mask specifying which fields to update. At this
time, the only mutable field is name. The only acceptable value is
"name". If unspecified, all mutable fields will be updated.
"""
callSet = _messages.MessageField('CallSet', 1)
callSetId = _messages.StringField(2, required=True)
updateMask = _messages.StringField(3)
class GenomicsDatasetsDeleteRequest(_messages.Message):
"""A GenomicsDatasetsDeleteRequest object.
Fields:
datasetId: The ID of the dataset to be deleted.
"""
datasetId = _messages.StringField(1, required=True)
class GenomicsDatasetsGetIamPolicyRequest(_messages.Message):
"""A GenomicsDatasetsGetIamPolicyRequest object.
Fields:
getIamPolicyRequest: A GetIamPolicyRequest resource to be passed as the
request body.
resource: REQUIRED: The resource for which policy is being specified.
Format is `datasets/<dataset ID>`.
"""
getIamPolicyRequest = _messages.MessageField('GetIamPolicyRequest', 1)
resource = _messages.StringField(2, required=True)
class GenomicsDatasetsGetRequest(_messages.Message):
"""A GenomicsDatasetsGetRequest object.
Fields:
datasetId: The ID of the dataset.
"""
datasetId = _messages.StringField(1, required=True)
class GenomicsDatasetsListRequest(_messages.Message):
"""A GenomicsDatasetsListRequest object.
Fields:
pageSize: The maximum number of results to return in a single page. If
unspecified, defaults to 50. The maximum value is 1024.
pageToken: The continuation token, which is used to page through large
result sets. To get the next page of results, set this parameter to the
value of `nextPageToken` from the previous response.
projectId: Required. The project to list datasets for.
"""
pageSize = _messages.IntegerField(1, variant=_messages.Variant.INT32)
pageToken = _messages.StringField(2)
projectId = _messages.StringField(3)
class GenomicsDatasetsPatchRequest(_messages.Message):
"""A GenomicsDatasetsPatchRequest object.
Fields:
dataset: A Dataset resource to be passed as the request body.
datasetId: The ID of the dataset to be updated.
updateMask: An optional mask specifying which fields to update. At this
time, the only mutable field is name. The only acceptable value is
"name". If unspecified, all mutable fields will be updated.
"""
dataset = _messages.MessageField('Dataset', 1)
datasetId = _messages.StringField(2, required=True)
updateMask = _messages.StringField(3)
class GenomicsDatasetsSetIamPolicyRequest(_messages.Message):
"""A GenomicsDatasetsSetIamPolicyRequest object.
Fields:
resource: REQUIRED: The resource for which policy is being specified.
Format is `datasets/<dataset ID>`.
setIamPolicyRequest: A SetIamPolicyRequest resource to be passed as the
request body.
"""
resource = _messages.StringField(1, required=True)
setIamPolicyRequest = _messages.MessageField('SetIamPolicyRequest', 2)
class GenomicsDatasetsTestIamPermissionsRequest(_messages.Message):
"""A GenomicsDatasetsTestIamPermissionsRequest object.
Fields:
resource: REQUIRED: The resource for which policy is being specified.
Format is `datasets/<dataset ID>`.
testIamPermissionsRequest: A TestIamPermissionsRequest resource to be
passed as the request body.
"""
resource = _messages.StringField(1, required=True)
testIamPermissionsRequest = _messages.MessageField('TestIamPermissionsRequest', 2)
class GenomicsDatasetsUndeleteRequest(_messages.Message):
"""A GenomicsDatasetsUndeleteRequest object.
Fields:
datasetId: The ID of the dataset to be undeleted.
undeleteDatasetRequest: A UndeleteDatasetRequest resource to be passed as
the request body.
"""
datasetId = _messages.StringField(1, required=True)
undeleteDatasetRequest = _messages.MessageField('UndeleteDatasetRequest', 2)
class GenomicsOperationsCancelRequest(_messages.Message):
"""A GenomicsOperationsCancelRequest object.
Fields:
cancelOperationRequest: A CancelOperationRequest resource to be passed as
the request body.
name: The name of the operation resource to be cancelled.
"""
cancelOperationRequest = _messages.MessageField('CancelOperationRequest', 1)
name = _messages.StringField(2, required=True)
class GenomicsOperationsGetRequest(_messages.Message):
"""A GenomicsOperationsGetRequest object.
Fields:
name: The name of the operation resource.
"""
name = _messages.StringField(1, required=True)
class GenomicsOperationsListRequest(_messages.Message):
"""A GenomicsOperationsListRequest object.
Fields:
filter: A string for filtering Operations. The following filter fields are
supported: * projectId: Required. Corresponds to
OperationMetadata.projectId. * createTime: The time this job was
created, in seconds from the
[epoch](http://en.wikipedia.org/wiki/Unix_time). Can use `>=` and/or
`<=` operators. * status: Can be `RUNNING`, `SUCCESS`, `FAILURE`,
or `CANCELED`. Only one status may be specified. Examples: *
`projectId = my-project AND createTime >= 1432140000` * `projectId = my-
project AND createTime >= 1432140000 AND createTime <= 1432150000 AND
status = RUNNING`
name: The name of the operation collection.
pageSize: The maximum number of results to return. If unspecified,
defaults to 256. The maximum value is 2048.
pageToken: The standard list page token.
"""
filter = _messages.StringField(1)
name = _messages.StringField(2, required=True)
pageSize = _messages.IntegerField(3, variant=_messages.Variant.INT32)
pageToken = _messages.StringField(4)
class GenomicsReadgroupsetsCoveragebucketsListRequest(_messages.Message):
"""A GenomicsReadgroupsetsCoveragebucketsListRequest object.
Fields:
end: The end position of the range on the reference, 0-based exclusive. If
specified, `referenceName` must also be specified. If unset or 0,
defaults to the length of the reference.
pageSize: The maximum number of results to return in a single page. If
unspecified, defaults to 1024. The maximum value is 2048.
pageToken: The continuation token, which is used to page through large
result sets. To get the next page of results, set this parameter to the
value of `nextPageToken` from the previous response.
readGroupSetId: Required. The ID of the read group set over which coverage
is requested.
referenceName: The name of the reference to query, within the reference
set associated with this query. Optional.
start: The start position of the range on the reference, 0-based
inclusive. If specified, `referenceName` must also be specified.
Defaults to 0.
targetBucketWidth: The desired width of each reported coverage bucket in
base pairs. This will be rounded down to the nearest | |
import os
import sys
import json
import numpy as np
import pandas as pd
import dataloader.file_io.dir_lister as dl
import dataloader.file_io.get_path as gp
SUPPORTED_DATASETS = ('cityscapes', 'cityscapes_video', 'cityscapes_sequence', 'cityscapes_extra', 'cityscapes_part',
'kitti', 'kitti_2012', 'kitti_2015', 'virtual_kitti', 'mapillary', 'mapillary_by_ID', 'gta5',
'synthia', 'bdd100k', 'voc2012', 'a2d2', 'lostandfound', 'camvid', 'make3d')
# If a directory path contains one one the following strings, it will be ignored
FOLDERS_TO_IGNORE = ('segmentation_trainid')
class FilelistCreator:
"""This class provides helper functions to create a list of all files inside a dataset."""
def __init__(self, path):
"""Initializes the dataset path and gets a list of all folders in the dataset
:param path: absolute path to the dataset
"""
if os.path.isdir(os.path.abspath(path)):
self.dataset_path = os.path.abspath(path)
else:
sys.exit("Der angegebene Dateipfad existiert nicht")
self.folders = dl.DirLister.get_directories(self.dataset_path)
self.json_dict = {}
def preprocess_directories_list(self, filter_names):
"""Removes all directories from the list that contain at least one of the given strings
:param filter_names: list of names by which the folders should be filtered
"""
self.folders = dl.DirLister.remove_dirs_by_name(self.folders, filter_names)
def preprocess_file_list(self, filter_dict):
"""Removes all entries from the file lists that do not contain all of the specified filters.
Sometimes (e.g. in segmentation masks) there are multiple representations of the data and one only wants to
keep one. This function will go through the self.json_dict name by name and for each name, it will remove all
entries from the 'files' entry that to not contain all of the strings in filter_dict[name].
:param filter_dict: dictionary where the key corresponds to one of the names and the values correspond to the
names which have to appear in the files
"""
for key in filter_dict.keys():
if key in self.json_dict['names']:
index = self.json_dict['names'].index(key)
self.json_dict['files'][index] = dl.DirLister.include_dirs_by_name(
self.json_dict['files'][index], filter_dict[key])
def create_filelist(self, filters, ending, ignore=(), ambiguous_names_to_ignore=()):
"""Creates a filtered list of files inside self.folders
:param filters: names which have to appear in the directory path
:param ending: file ending of valid files
:param ignore: list of strings. All files/folders containing these strings will be ignored
:param ambiguous_names_to_ignore: Sometimes it is inevitable to have a filter name that also appears in every
path name. In this case, these longer strings can be specified in this parameter and the folders and files
will only be included if the filters appear outside of any of the ambiguous_strings_to_ignore.
:return: list of all filtered folders and a list of all filtered files inside these folders.
"""
self.preprocess_directories_list(FOLDERS_TO_IGNORE)
folders = dl.DirLister.include_dirs_by_name(self.folders, filters, ignore, ambiguous_names_to_ignore)
folders = sorted(folders, key=str.lower)
filelist = []
for fold in folders:
files = dl.DirLister.get_files_by_ending(fold, ending, ignore)
filelist.extend(files)
filelist = sorted(filelist, key=str.lower)
return folders, filelist
def dump_to_json(self, filename, remove_root=True):
"""Dumps the json list to a json file
:param filename: name of the json file
:param remove_root: if True, the root directory up to the dataset path is removed to store the images
relative to the dataset path
"""
dump_location = os.path.join(self.dataset_path, filename)
if remove_root:
root_stringlength = len(self.dataset_path) + 1
for i in range(len(self.json_dict['folders'])):
for j in range(len(self.json_dict['folders'][i])):
folder_file = self.json_dict['folders'][i][j]
self.json_dict['folders'][i][j] = folder_file[root_stringlength:]
for i in range(len(self.json_dict['files'])):
for j in range(len(self.json_dict['files'][i])):
image_file = self.json_dict['files'][i][j]
self.json_dict['files'][i][j] = image_file[root_stringlength:]
with open(dump_location, 'w') as fp:
json.dump(self.json_dict, fp)
def create_json_from_list(self, json_list, stereo_replace):
"""Creates a dictionary in the format of the basic_files.json.
Takes a dictionary with the dataset-specific names and file endings and fills the dictionary self.json_dict
with the entries from the dataset folder based on the information in the given dictionary.
This method has to be implemented for each dataset-specific child class.
:param json_list: dataset-spicific dictionary of the form
{'names: [list of all data categories that this dataset provides, e.g. 'color', 'depth', ...]
'types': [list of the corresponding file types, e.g. '.png', '.txt', ...]
'filters': [list of the corresponding filters to identify the folders for each name, e.g. 'camera', ...]}
:param stereo_replace: dicionary that defines the strings that have to be interchanged in order to get the
right stereo image from the left stereo image: {left_image_string: right_image_string}
"""
raise NotImplementedError
class KITTIFilelistCreator(FilelistCreator):
"""Class to create the KITTI file list"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def create_json_from_list(self, json_list, stereo_replace):
"""Creates a dictionary in the format of the basic_files.json.
Takes a dictionary with the dataset-specific names and file endings and fills the dictionary self.json_dict
with the entries from the dataset folder based on the information in the given dictionary.
:param json_list: dataset-spicific dictionary of the form
{'names: [list of all data categories that this dataset provides, e.g. 'color', 'depth', ...],
'types': [list of the corresponding file types, e.g. '.png', '.txt', ...],
'filters': [list of the corresponding filters to identify the folders for each name, e.g. 'camera', ...]}
:param stereo_replace: dicionary that defines the strings that have to be interchanged in order to get the
right stereo image from the left stereo image: {left_image_string: right_image_string}
"""
folders_list = []
file_list = []
position_list = []
numerical_list = []
main_files = []
for i, name, type, filter in zip(range(len(json_list['names'])), json_list['names'],
json_list['types'], json_list['filters']):
folders, files = self.create_filelist(filter, type)
folders_list.append(folders)
file_list.append(files)
# positions contains 4-tuples, where the single entries have the following meaning:
# 1. global position inside the dataset (sorted by frame number and sequence
# 2. number of preceding frames in the sequence
# 3. number of frames in the sequence after the current frame
# 4. local position inside the list of the elements (e.g. depth has 20000 elements but color has 40000
# then the first entry will contain the mapping from depth to color and the fourth entry will contain
# numbers from 0 to 20000
positions = []
lower_limit = [0]
upper_limit = []
old_frame_number = None
new_frame_number = None
# get the sequence limits (upper and lower)
for file in files:
old_frame_number = new_frame_number
new_frame_number = int(os.path.splitext(os.path.split(file)[1])[0])
if old_frame_number != new_frame_number - 1 and old_frame_number is not None:
upper_limit.append(files.index(file) - 1)
lower_limit.append(files.index(file))
upper_limit.append(len(files) - 1)
index = 0
# get the position entries and the file names of all image files, numerical values are handled
# differently
for j, file in zip(range(len(files)), files):
file = file.split(self.dataset_path + os.sep)[1]
file = os.path.join(*file.split(os.sep)[1:])
if index < len(lower_limit) - 1 and j == lower_limit[index + 1]:
index += 1
if i == 0:
positions.append((len(positions), j - lower_limit[index], upper_limit[index] - j, j))
main_files.append(file)
else:
if 'right' in name:
for key in stereo_replace.keys():
file = file.replace(stereo_replace[key], key)
positions.append((main_files.index(file), j - lower_limit[index], upper_limit[index] - j, j))
position_list.append(positions)
numerical_list.append(None)
print('name: ', name, 'num_items: ', len(files))
# camera intrinsic parameters
camera_intrinsics = []
camera_intrinsics_right = []
json_list['names'].extend(['camera_intrinsics', 'camera_intrinsics_right'])
json_list['types'].extend(['.txt', '.txt'])
json_list['filters'].extend([['Raw_data'], ['Raw_data']])
for file in main_files:
base = file.split(os.sep)[0]
# get the corresponding calibration files, every color frame has a calibration file
if 'test' in file:
param_file_name = os.path.split(file)[1].replace('png', 'txt')
calib_file = os.path.join(self.dataset_path, 'Raw_data', base, 'intrinsics', param_file_name)
left = open(calib_file).readlines()[0][6:].split()
right = open(calib_file).readlines()[0][6:].split()
else:
calib_file = os.path.join(self.dataset_path, 'Raw_data', base, 'calib_cam_to_cam.txt')
left = open(calib_file).readlines()[:20][-1][6:].split()
right = open(calib_file).readlines()[:28][-1][6:].split()
left_matrix = np.eye(4)
right_matrix = np.eye(4)
left_matrix[:3, :3] = np.array([float(l) for l in left]).reshape((3, 3))
right_matrix[:3, :3] = np.array([float(r) for r in right]).reshape((3, 3))
left_matrix = list(left_matrix)
right_matrix = list(right_matrix)
left_matrix = [list(l) for l in left_matrix]
right_matrix = [list(r) for r in right_matrix]
camera_intrinsics.append(left_matrix)
camera_intrinsics_right.append(right_matrix)
print('camera_intrinsics:', len(camera_intrinsics))
print('camera_intrinsics_right:', len(camera_intrinsics_right))
folders_list.extend([folders_list[0], folders_list[1]])
position_list.extend([position_list[0], position_list[0]])
file_list.extend([file_list[0].copy(), file_list[1].copy()])
numerical_list.extend([camera_intrinsics, camera_intrinsics_right])
# velocity and timestamps
json_list['names'].extend(['timestamp', 'velocity'])
json_list['types'].extend(['.txt', '.txt'])
json_list['filters'].extend([['Raw_data', 'oxts'], ['Raw_data', 'oxts']])
folders, files = self.create_filelist(['Raw_data', 'oxts'], '.txt')
folders_vel = dl.DirLister.include_dirs_by_name(folders, 'data')
folders_time = [os.path.split(f)[0] for f in folders_vel]
files.extend([os.path.join(f, 'timestamps.txt') for f in folders_time])
folders_list.extend([folders_time, folders_vel])
times = []
velocities = []
for file in files:
if 'timestamps' in file:
temp_time = np.array(pd.read_csv(file, header=None, delimiter=' ')[1].values)
time = [float(t.split(':')[0])*3600 + float(t.split(':')[1])*60 + float(t.split(':')[2])
for t in temp_time]
times.extend(time)
if '00000' in file:
temp_data = np.array(pd.read_csv(file, header=None, delimiter=' ').values)[0]
velocity = np.sqrt(temp_data[8]**2 + temp_data[9]**2 + temp_data[10]**2)
velocities.append(velocity)
file_list.extend([file_list[0][:len(times)], file_list[0][:len(velocities)]])
position_list.extend([position_list[0][:len(times)], position_list[0][:len(velocities)]])
numerical_list.extend([times, velocities])
print('timestamps:', len(times))
print('velocities', len(velocities))
# poses for odometry evaluation (odometry dataset needed if desired!)
json_list['names'].extend(['poses'])
json_list['types'].extend(['.txt'])
json_list['filters'].extend([['Raw_data']])
folders, files = self.create_filelist('poses', '.txt')
if files:
poses = []
drives = []
frame_numbers = []
# defined officially in the | |
__all__ = [
"fit_gp",
"ft_gp",
"fit_lm",
"ft_lm",
"fit_rf",
"ft_rf",
"fit_kmeans",
"ft_kmeans",
]
## Fitting via sklearn package
try:
from sklearn.base import clone
from sklearn.linear_model import LinearRegression
from sklearn.gaussian_process import GaussianProcessRegressor
from sklearn.gaussian_process.kernels import Kernel, RBF, ConstantKernel as Con
from sklearn.cluster import KMeans
from sklearn.ensemble import RandomForestRegressor
except ModuleNotFoundError:
raise ModuleNotFoundError("module sklearn not found")
import grama as gr
from copy import deepcopy
from grama import add_pipe, pipe
from pandas import concat, DataFrame, Series
from toolz import curry
from warnings import filterwarnings
## Helper functions and classes
# --------------------------------------------------
def standardize_cols(df, ser_min, ser_max, var):
"""
@pre set(ser_min.index) == set(ser_max.index)
"""
df_std = df.copy()
for v in var:
den = ser_max[v] - ser_min[v]
if den < 1e-16:
den = 1
df_std[v] = (df_std[v] - ser_min[v]) / den
return df_std
def restore_cols(df, ser_min, ser_max, var):
"""
@pre set(ser_min.index) == set(ser_max.index)
"""
df_res = df.copy()
for v in var:
den = ser_max[v] - ser_min[v]
if den < 1e-16:
den = 1
df_res[v] = den * df[v] + ser_min[v]
return df_res
class FunctionGPR(gr.Function):
def __init__(self, gpr, var, out, name, runtime, var_min, var_max):
self.gpr = gpr
# self.df_train = df_train
self.var = var
## "Natural" outputs; what we're modeling
self.out_nat = out
## Predicted outputs; mean and std
self.out_mean = list(map(lambda s: s + "_mean", out))
self.out_sd = list(map(lambda s: s + "_sd", out))
self.out = self.out_mean + self.out_sd
self.name = name
self.runtime = runtime
self.var_min = var_min
self.var_max = var_max
def eval(self, df):
## Check invariant; model inputs must be subset of df columns
if not set(self.var).issubset(set(df.columns)):
raise ValueError(
"Model function `{}` var not a subset of given columns".format(
self.name
)
)
df_sd = standardize_cols(df, self.var_min, self.var_max, self.var)
y, y_sd = self.gpr.predict(df_sd[self.var], return_std=True)
return concat(
(
DataFrame(data=y, columns=self.out_mean),
DataFrame(data=y_sd, columns=self.out_sd),
),
axis=1,
)
def copy(self):
func_new = FunctionGPR(
self.gpr,
self.df_train.copy(),
self.var,
self.out_nat,
self.name,
self.runtime,
)
return func_new
class FunctionRegressor(gr.Function):
def __init__(self, regressor, var, out, name, runtime):
"""
Args:
regressor (scikit Regressor):
"""
self.regressor = regressor
self.var = var
self.out = list(map(lambda s: s + "_mean", out))
self.name = name
self.runtime = runtime
def eval(self, df):
## Check invariant; model inputs must be subset of df columns
if not set(self.var).issubset(set(df.columns)):
raise ValueError(
"Model function `{}` var not a subset of given columns".format(
self.name
)
)
## Predict
y = self.regressor.predict(df[self.var])
return DataFrame(data=y, columns=self.out)
## Fit GP model with sklearn
# --------------------------------------------------
@curry
def fit_gp(
df,
md=None,
var=None,
out=None,
domain=None,
density=None,
kernels=None,
seed=None,
suppress_warnings=True,
n_restart=5,
alpha=1e-10,
):
r"""Fit a gaussian process
Fit a gaussian process to given data. Specify var and out, or inherit from
an existing model.
Note that the new model will have two outputs `y_mean, y_sd` for each
original output `y`. The quantity `y_mean` is the best-fit value, while
`y_sd` is a measure of predictive uncertainty.
Args:
df (DataFrame): Data for function fitting
md (gr.Model): Model from which to inherit metadata
var (list(str) or None): List of features or None for all except outputs
out (list(str)): List of outputs to fit
domain (gr.Domain): Domain for new model
density (gr.Density): Density for new model
seed (int or None): Random seed for fitting process
kernels (sklearn.gaussian_process.kernels.Kernel or dict or None): Kernel for GP
n_restart (int): Restarts for optimization
alpha (float or iterable): Value added to diagonal of kernel matrix
suppress_warnings (bool): Suppress warnings when fitting?
Returns:
gr.Model: A grama model with fitted function(s)
Notes:
- Wrapper for sklearn.gaussian_process.GaussianProcessRegressor
"""
if suppress_warnings:
filterwarnings("ignore")
n_obs, n_in = df.shape
## Infer fitting metadata, if available
if not (md is None):
domain = md.domain
density = md.density
out = md.out
## Check invariants
if not set(out).issubset(set(df.columns)):
raise ValueError("out must be subset of df.columns")
## Default input value
if var is None:
var = list(set(df.columns).difference(set(out)))
## Check more invariants
set_inter = set(out).intersection(set(var))
if len(set_inter) > 0:
raise ValueError(
"out and var must be disjoint; intersect = {}".format(set_inter)
)
if not set(var).issubset(set(df.columns)):
raise ValueError("var must be subset of df.columns")
## Pre-process kernel selection
if kernels is None:
# Vectorize
kernels = {o: None for o in out}
elif isinstance(kernels, Kernel):
kernels = {o: kernels for o in out}
## Pre-process data
var_min = df[var].min()
var_max = df[var].max()
df_sd = standardize_cols(df, var_min, var_max, var)
## Construct gaussian process for each output
functions = []
for output in out:
# Define and fit model
gpr = GaussianProcessRegressor(
kernel=deepcopy(kernels[output]),
random_state=seed,
normalize_y=True,
copy_X_train=True,
n_restarts_optimizer=n_restart,
alpha=alpha,
)
gpr.fit(df_sd[var], df_sd[output])
name = "GP ({})".format(str(gpr.kernel_))
fun = FunctionGPR(gpr, var, [output], name, 0, var_min, var_max)
functions.append(fun)
## Construct model
return gr.Model(functions=functions, domain=domain, density=density)
ft_gp = add_pipe(fit_gp)
## Fit random forest model with sklearn
# --------------------------------------------------
@curry
def fit_rf(
df,
md=None,
var=None,
out=None,
domain=None,
density=None,
seed=None,
suppress_warnings=True,
**kwargs
):
r"""Fit a random forest
Fit a random forest to given data. Specify inputs and outputs, or inherit
from an existing model.
Args:
df (DataFrame): Data for function fitting
md (gr.Model): Model from which to inherit metadata
var (list(str) or None): List of features or None for all except outputs
out (list(str)): List of outputs to fit
domain (gr.Domain): Domain for new model
density (gr.Density): Density for new model
seed (int or None): Random seed for fitting process
suppress_warnings (bool): Suppress warnings when fitting?
Keyword Arguments:
n_estimators (int):
criterion (int):
max_depth (int or None):
min_samples_split (int, float):
min_samples_leaf (int, float):
min_weight_fraction_leaf (float):
max_features (int, float, string):
max_leaf_nodes (int or None):
min_impurity_decrease (float):
min_impurity_split (float):
bootstrap (bool):
oob_score (bool):
n_jobs (int or None):
random_state (int):
Returns:
gr.Model: A grama model with fitted function(s)
Notes:
- Wrapper for sklearn.ensemble.RandomForestRegressor
"""
if suppress_warnings:
filterwarnings("ignore")
n_obs, n_in = df.shape
## Infer fitting metadata, if available
if not (md is None):
domain = md.domain
density = md.density
out = md.out
## Check invariants
if not set(out).issubset(set(df.columns)):
raise ValueError("out must be subset of df.columns")
## Default input value
if var is None:
var = list(set(df.columns).difference(set(out)))
## Check more invariants
set_inter = set(out).intersection(set(var))
if len(set_inter) > 0:
raise ValueError(
"outputs and inputs must be disjoint; intersect = {}".format(set_inter)
)
if not set(var).issubset(set(df.columns)):
raise ValueError("var must be subset of df.columns")
## Construct gaussian process for each output
functions = []
for output in out:
rf = RandomForestRegressor(random_state=seed, **kwargs)
rf.fit(df[var], df[output])
name = "RF"
fun = FunctionRegressor(rf, var, [output], name, 0)
functions.append(fun)
## Construct model
return gr.Model(functions=functions, domain=domain, density=density)
ft_rf = add_pipe(fit_rf)
## Fit linear model with sklearn
# --------------------------------------------------
@curry
def fit_lm(
df,
md=None,
var=None,
out=None,
domain=None,
density=None,
seed=None,
suppress_warnings=True,
**kwargs
):
r"""Fit a linear model
Fit a linear model to given data. Specify inputs and outputs, or inherit
from an existing model.
Args:
df (DataFrame): Data for function fitting
md (gr.Model): Model from which to inherit metadata
var (list(str) or None): List of features or None for all except outputs
out (list(str)): List of outputs to fit
domain (gr.Domain): Domain for new model
density (gr.Density): Density for new model
seed (int or None): Random seed for fitting process
suppress_warnings (bool): Suppress warnings when fitting?
Returns:
gr.Model: A grama model with fitted function(s)
Notes:
- Wrapper for sklearn.ensemble.RandomForestRegressor
"""
if suppress_warnings:
filterwarnings("ignore")
n_obs, n_in = df.shape
## Infer fitting metadata, if available
if not (md is None):
domain = md.domain
density = md.density
out = md.out
## Check invariants
if not set(out).issubset(set(df.columns)):
raise ValueError("out must be subset of df.columns")
## Default input value
if var is None:
var = list(set(df.columns).difference(set(out)))
## Check more invariants
set_inter = set(out).intersection(set(var))
if len(set_inter) > 0:
raise ValueError(
"outputs and inputs must be disjoint; intersect = {}".format(set_inter)
)
if not set(var).issubset(set(df.columns)):
raise ValueError("var must be subset of df.columns")
## Construct gaussian process for each output
functions = []
for output in out:
lm = LinearRegression(**kwargs)
lm.fit(df[var], df[output])
name = "LM"
fun = FunctionRegressor(lm, var, [output], name, 0)
functions.append(fun)
## Construct model
return gr.Model(functions=functions, domain=domain, density=density)
ft_lm = add_pipe(fit_lm)
## Fit kmeans clustering model
# --------------------------------------------------
@curry
def fit_kmeans(df, var=None, colname="cluster_id", seed=None, **kwargs):
r"""K-means cluster a dataset
Create a cluster-labeling model on a dataset using the K-means algorithm.
Args:
df (DataFrame): Hybrid point results from gr.eval_hybrid()
var (list or None): Variables in df on which to cluster. Use None to
cluster on all variables.
colname (string): | |
Oo0Ooo + i1IIi + OoooooooOO % o0oOOo0O0Ooo
if ( iIiiII != None ) :
oOo0OOOOOO , o0O0 = iIiiII . rloc_next_hop
o00ooO0Ooo = bold ( "nh {}({})" . format ( o0O0 , oOo0OOOOOO ) , False )
lprint ( " Install host-route via best {}" . format ( o00ooO0Ooo ) )
lisp_install_host_route ( I1iiIiiii1111 , None , False )
lisp_install_host_route ( I1iiIiiii1111 , o0O0 , True )
if 4 - 4: I1IiiI
if 74 - 74: oO0o / i11iIiiIii + Oo0Ooo
if 99 - 99: I1Ii111 . II111iiii * IiII . II111iiii + OoOoOO00
def add_to_rloc_probe_list ( self , eid , group ) :
I1iiIiiii1111 = self . rloc . print_address_no_iid ( )
o00o = self . translated_port
if ( o00o != 0 ) : I1iiIiiii1111 += ":" + str ( o00o )
if 36 - 36: OoO0O00 * iII111i % ooOoO0o % OoOoOO00 * I1IiiI % i1IIi
if ( lisp_rloc_probe_list . has_key ( I1iiIiiii1111 ) == False ) :
lisp_rloc_probe_list [ I1iiIiiii1111 ] = [ ]
if 25 - 25: iII111i + I1IiiI / OoO0O00 - I1IiiI / OoooooooOO - ooOoO0o
if 22 - 22: iII111i
if ( group . is_null ( ) ) : group . instance_id = 0
for O00oo00o000o , ooo0OO , O0oOo00Oo0oo0 in lisp_rloc_probe_list [ I1iiIiiii1111 ] :
if ( ooo0OO . is_exact_match ( eid ) and O0oOo00Oo0oo0 . is_exact_match ( group ) ) :
if ( O00oo00o000o == self ) :
if ( lisp_rloc_probe_list [ I1iiIiiii1111 ] == [ ] ) :
lisp_rloc_probe_list . pop ( I1iiIiiii1111 )
if 30 - 30: OoO0O00 + I11i + Oo0Ooo
return
if 77 - 77: II111iiii
lisp_rloc_probe_list [ I1iiIiiii1111 ] . remove ( [ O00oo00o000o , ooo0OO , O0oOo00Oo0oo0 ] )
break
if 92 - 92: I1Ii111 / I1IiiI / I1ii11iIi11i + I11i + Ii1I
if 51 - 51: OOooOOo
lisp_rloc_probe_list [ I1iiIiiii1111 ] . append ( [ self , eid , group ] )
if 85 - 85: II111iiii
if 60 - 60: Ii1I * OOooOOo - o0oOOo0O0Ooo - Ii1I / Oo0Ooo . OOooOOo
if 43 - 43: II111iiii * o0oOOo0O0Ooo % o0oOOo0O0Ooo + iIii1I11I1II1 + OoOoOO00
if 54 - 54: II111iiii + OOooOOo * Oo0Ooo * I1Ii111 - o0oOOo0O0Ooo % Ii1I
if 69 - 69: I11i + OoOoOO00 - i11iIiiIii * O0 % O0
oOOoo0O00 = lisp_rloc_probe_list [ I1iiIiiii1111 ] [ 0 ] [ 0 ]
if ( oOOoo0O00 . state == LISP_RLOC_UNREACH_STATE ) :
self . state = LISP_RLOC_UNREACH_STATE
self . last_state_change = lisp_get_timestamp ( )
if 81 - 81: I11i - o0oOOo0O0Ooo % Ii1I / I1Ii111 * II111iiii
if 40 - 40: OoO0O00 . i11iIiiIii
if 36 - 36: o0oOOo0O0Ooo * iII111i / I1ii11iIi11i % i1IIi % I1ii11iIi11i + i11iIiiIii
def delete_from_rloc_probe_list ( self , eid , group ) :
I1iiIiiii1111 = self . rloc . print_address_no_iid ( )
o00o = self . translated_port
if ( o00o != 0 ) : I1iiIiiii1111 += ":" + str ( o00o )
if ( lisp_rloc_probe_list . has_key ( I1iiIiiii1111 ) == False ) : return
if 24 - 24: I1Ii111 / ooOoO0o - i11iIiiIii
Iii111II1I11I = [ ]
for iIIiI11iI1Ii1 in lisp_rloc_probe_list [ I1iiIiiii1111 ] :
if ( iIIiI11iI1Ii1 [ 0 ] != self ) : continue
if ( iIIiI11iI1Ii1 [ 1 ] . is_exact_match ( eid ) == False ) : continue
if ( iIIiI11iI1Ii1 [ 2 ] . is_exact_match ( group ) == False ) : continue
Iii111II1I11I = iIIiI11iI1Ii1
break
if 18 - 18: OOooOOo / O0 . OoO0O00 - II111iiii * OOooOOo
if ( Iii111II1I11I == [ ] ) : return
if 13 - 13: OoO0O00 % i1IIi . i11iIiiIii / iII111i
try :
lisp_rloc_probe_list [ I1iiIiiii1111 ] . remove ( Iii111II1I11I )
if ( lisp_rloc_probe_list [ I1iiIiiii1111 ] == [ ] ) :
lisp_rloc_probe_list . pop ( I1iiIiiii1111 )
if 28 - 28: i1IIi - iII111i + o0oOOo0O0Ooo / Oo0Ooo * oO0o
except :
return
if 8 - 8: ooOoO0o + OOooOOo * ooOoO0o / i1IIi . I1ii11iIi11i
if 4 - 4: Ii1I - Oo0Ooo . i1IIi + iIii1I11I1II1
if 28 - 28: O0 / ooOoO0o / IiII - I11i + IiII + OoO0O00
def print_rloc_probe_state ( self , trailing_linefeed ) :
Oo0O = ""
oOOoo0O00 = self
while ( True ) :
OOoOo0O = oOOoo0O00 . last_rloc_probe
if ( OOoOo0O == None ) : OOoOo0O = 0
iII111 = oOOoo0O00 . last_rloc_probe_reply
if ( iII111 == None ) : iII111 = 0
OOOOo000o = oOOoo0O00 . print_rloc_probe_rtt ( )
IiIIi1I1I11Ii = space ( 4 )
if 64 - 64: OoO0O00 . I1IiiI + I1Ii111
if ( oOOoo0O00 . rloc_next_hop == None ) :
Oo0O += "RLOC-Probing:\n"
else :
oOo0OOOOOO , o0O0 = oOOoo0O00 . rloc_next_hop
Oo0O += "RLOC-Probing for nh {}({}):\n" . format ( o0O0 , oOo0OOOOOO )
if 42 - 42: oO0o + iIii1I11I1II1 / Ii1I - oO0o % oO0o . I1Ii111
if 88 - 88: Oo0Ooo / Ii1I . OOooOOo * Oo0Ooo
Oo0O += ( "{}RLOC-probe request sent: {}\n{}RLOC-probe reply " + "received: {}, rtt {}" ) . format ( IiIIi1I1I11Ii , lisp_print_elapsed ( OOoOo0O ) ,
# Oo0Ooo
IiIIi1I1I11Ii , lisp_print_elapsed ( iII111 ) , OOOOo000o )
if 44 - 44: ooOoO0o * iII111i * IiII % o0oOOo0O0Ooo
if ( trailing_linefeed ) : Oo0O += "\n"
if 45 - 45: OoOoOO00 % o0oOOo0O0Ooo + IiII / i11iIiiIii
oOOoo0O00 = oOOoo0O00 . next_rloc
if ( oOOoo0O00 == None ) : break
Oo0O += "\n"
if 29 - 29: iIii1I11I1II1 . OoO0O00 / I1IiiI
return ( Oo0O )
if 38 - 38: Oo0Ooo / Oo0Ooo % ooOoO0o
if 56 - 56: oO0o / iII111i % i1IIi * II111iiii . Ii1I
def get_encap_keys ( self ) :
o00o = "4341" if self . translated_port == 0 else str ( self . translated_port )
if 10 - 10: ooOoO0o - I1ii11iIi11i
I1iiIiiii1111 = self . rloc . print_address_no_iid ( ) + ":" + o00o
if 82 - 82: o0oOOo0O0Ooo / I11i - I11i / O0 * I1IiiI / OoO0O00
try :
o00OO0o0 = lisp_crypto_keys_by_rloc_encap [ I1iiIiiii1111 ]
if ( o00OO0o0 [ 1 ] ) : return ( o00OO0o0 [ 1 ] . encrypt_key , o00OO0o0 [ 1 ] . icv_key )
return ( None , None )
except :
return ( None , None )
if 71 - 71: I11i % I11i - i11iIiiIii + iIii1I11I1II1 / iII111i
if 63 - 63: O0 * i11iIiiIii / IiII / IiII
if 72 - 72: i11iIiiIii * OoOoOO00 % oO0o / I1Ii111
def rloc_recent_rekey ( self ) :
o00o = "4341" if self . translated_port == 0 else str ( self . translated_port )
if 9 - 9: iIii1I11I1II1 . IiII
I1iiIiiii1111 = self . rloc . print_address_no_iid ( ) + ":" + o00o
if 42 - 42: i1IIi / Ii1I * I1ii11iIi11i
try :
o0OoOo0o0OOoO0 = lisp_crypto_keys_by_rloc_encap [ I1iiIiiii1111 ] [ 1 ]
if ( o0OoOo0o0OOoO0 == None ) : return ( False )
if ( o0OoOo0o0OOoO0 . last_rekey == None ) : return ( True )
return ( time . time ( ) - o0OoOo0o0OOoO0 . last_rekey < 1 )
except :
return ( False )
if 9 - 9: I11i % i1IIi / i1IIi / OoO0O00
if 46 - 46: I1Ii111 * II111iiii + II111iiii * O0 % II111iiii
if 37 - 37: OOooOOo . iIii1I11I1II1 / O0 . ooOoO0o + OOooOOo - OoooooooOO
if 96 - 96: I1Ii111 / oO0o . I1ii11iIi11i % I1IiiI * OOooOOo
class lisp_mapping ( ) :
def __init__ ( self , eid , group , rloc_set ) :
self . eid = eid
if ( eid == "" ) : self . eid = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . group = group
if ( group == "" ) | |
<gh_stars>0
"""annotator_distance.py - statistical significance of distance between genomic segments
=====================================================================================
Purpose
-------
The script :file:`annotator_distance.py` computes the statististical
significance of the association between segments on a genome.
The genome is arranged into ``workspaces``, contiguous blocks in the
genome that are labeled at their termini. An example of workspaces are
intergenic spaces labeled ``3'`` or ``5'`` according to the adjacent
gene.
Within the workspace are ``segments`` of interest, for example
transcription factor binding sites. The script counts the distance of
a segment to the nearest ``workspace`` terminus. The counts are
aggregated over all workspaces.
Next, the script randomly rearranges ``segments`` within a
``workspace`` in order to test the statistical significance of the
observed counts.
The script implements various sampling and counting methods.
Usage
-----
Type::
python annotator_distances.py --help
for command line usage.
Command line options
--------------------
"""
import os
import sys
import collections
import itertools
import CGAT.GTF as GTF
import CGAT.Bed as Bed
import CGAT.Intervals as Intervals
import CGAT.IOTools as IOTools
import CGAT.Experiment as E
import bx.intervals.intersection
import numpy
import random
import math
import array
import scipy
import scipy.stats
import matplotlib.pyplot as plt
# global functions, defined once for optimization purposes
normalize_transform = lambda x, y: numpy.array(x, float) / (sum(x) + y)
cumulative_transform = lambda x, y: numpy.cumsum(
numpy.array(x, float) / (sum(x) + y))
def readWorkspace(infile,
workspace_builder="raw",
label="none",
map_id2annotation={}):
"""read workspace from infile.
A workspace is a collection of intervals with two labels associated
to each interval, one for the 5' and one for the 3' end.
Available workspace builders are:
gff
take a gff file.
gtf-intergenic
build workspace from intergenic segments in a gtf file.
gtf-intronic
build workspace from intronic segments in a gtf file
gtf-genic
the workspace is built from genes (first to last exon).
Available labels are:
none
no labels are given to the ends of workspaces
direction
labels are given based on the 5'/3' end of the
bounding exon
annotation
labels are given based on a gene2annotation map.
returns a list of segments for each contig in a dictionary
"""
if label == "none":
label_f = lambda x, y: (("X",), ("X",))
info_f = lambda x: None
elif label == "direction":
label_f = lambda x, y: ((("5", "3")[x],), (("3", "5")[y],))
info_f = lambda x: x.strand == "+"
elif label == "annotation":
label_f = lambda x, y: (map_id2annotation[x], map_id2annotation[y])
info_f = lambda x: x.gene_id
if workspace_builder == "gff":
workspace = GTF.readAsIntervals(GFF.iterator(infile))
elif workspace_builder == "gtf-intergenic":
workspace = collections.defaultdict(list)
# get all genes
for e in GTF.merged_gene_iterator(GTF.iterator(infile)):
workspace[e.contig].append((e.start, e.end, info_f(e)))
# convert to intergenic regions.
# overlapping genes are merged and the labels
# of the right-most entry is retained
for contig in workspace.keys():
segs = workspace[contig]
segs.sort()
last = segs[0]
new_segs = []
for this in segs[1:]:
if last[1] >= this[0]:
if this[1] > last[1]:
last = (last[0], this[1], this[2])
continue
assert last[1] < this[0], "this=%s, last=%s" % (this, last)
new_segs.append((last[1], this[0],
label_f(last[2], this[2])))
last = this
workspace[contig] = new_segs
elif workspace_builder == "gtf-intronic":
workspace = collections.defaultdict(list)
# the current procedure will count nested genes
# twice
for ee in GTF.flat_gene_iterator(GTF.iterator(infile)):
exons = Intervals.combine([(e.start, e.end) for e in ee])
introns = Intervals.complement(exons)
r = ee[0]
for start, end in introns:
workspace[r.contig].append((start,
end,
label_f(info_f(r), info_f(r))
))
elif workspace_builder == "gtf-genic":
workspace = collections.defaultdict(list)
# the current procedure will count nested genes
# twice
for ee in GTF.flat_gene_iterator(GTF.iterator(infile)):
exons = Intervals.combine([(e.start, e.end) for e in ee])
start, end = exons[0][0], exons[-1][1]
r = ee[0]
workspace[r.contig].append((start,
end,
label_f(info_f(r), info_f(r))
))
else:
raise ValueError("unknown workspace_builder %s" % workspace_builder)
return workspace
def readSegments(infile, indexed_workspace,
truncate=False,
format="gtf",
keep_ambiguous=False,
remove_overhangs=False):
"""read segments from infile.
segments not overlapping with indexed_workspace are removed.
If :attr: truncate is given, segments extending beyond the workspace
are truncated.
returns a list of segments for each contig in a dictionary
"""
counter = E.Counter()
segments = collections.defaultdict(list)
def addSegment(contig, start, end, counter):
if contig in indexed_workspace:
r = indexed_workspace[contig].find(start, end)
if not r:
counter.nskipped += 1
return
if len(r) > 1:
counter.nambiguous += 1
if not keep_ambiguous:
return
if truncate:
for x in r:
wstart, wend = x.start, x.end
rstart, rend = max(start, wstart), min(end, wend)
if start < wstart or end > wend:
counter.ntruncated += 1
segments[contig].append((rstart, rend))
counter.added += 1
elif remove_overhangs:
for x in r:
wstart, wend = x.start, x.end
rstart, rend = max(start, wstart), min(end, wend)
if start < wstart or end > wend:
counter.overhangs += 1
break
else:
segments[contig].append((start, end))
else:
segments[contig].append((start, end))
counter.added += 1
counter.nkept += 1
if format == "gtf":
gtf_iterator = GTF.flat_gene_iterator(GTF.iterator(infile))
for gene in gtf_iterator:
# get start and end ignoring introns
# contig, start, end = gene[0].contig, min( [x.start for x in gene] ), max( [x.end for x in gene] )
contig, coords = gene[0].contig, [(x.start, x.end) for x in gene]
counter.ninput += 1
for start, end in coords:
addSegment(contig, start, end, counter)
elif format == "bed":
bed_iterator = Bed.iterator(infile)
for bed in bed_iterator:
counter.ninput += 1
addSegment(bed.contig, bed.start, bed.end, counter)
E.info("read segments: %s" % str(counter))
return segments
class Sampler(object):
"""base clase for objcects that create a sample of
randomly arranged segments in a workspace.
"""
def __init__(self, observed, work_start, work_end):
self.mObserved = observed
self.mWorkStart, self.mWorkEnd = work_start, work_end
self.mLengths = [x[1] - x[0] for x in observed]
self.mTotalLength = sum(self.mLengths)
self.mFreeLength = work_end - work_start - self.mTotalLength
assert self.mFreeLength >= 0, "negative length: workspace=(%i,%i) %i-%i<0, segments=%s, lengths=%s" % \
(work_start, work_end, work_end - work_start,
self.mTotalLength, self.mObserved, self.mLengths)
def sample(self):
raise NotImplementedError("define sample() in base classes")
class SamplerPermutation(Sampler):
"""permute order of fragments and distribute randomly.
The permutation works like this:
1. Randomly permutate the order of segments
2. Split the free space (:attr:mFreeSpace) within the workspace into n+1 randomly sized gaps
3. Insert the gaps between permutated segments
"""
def sample(self):
"""return simulated fragments."""
simulated = []
# 1. permutate order of segments
random.shuffle(self.mLengths)
# 2. determine size of space between samples
points = []
for x in range(len(self.mLengths) + 1):
points.append(random.randint(0, self.mFreeLength))
points.sort()
# 3. move segments to appropriate place
start = self.mWorkStart
simulated = []
last = 0
for x in range(len(self.mLengths)):
start += points[x] - last
simulated.append((start, start + self.mLengths[x]))
start += self.mLengths[x]
last = points[x]
assert start + (points[-1] - last) <= self.mWorkEnd, "start=%i, points[-1]=%i, work_end=%i" % \
(start, points[-1] - last, self.mWorkEnd)
return simulated
class SamplerBlocks(Sampler):
"""move blocks of fragments to take into account clustering."""
def sample(self):
"""return simulated fragments."""
simulated = []
raise NotImplementedError
class SamplerGaps(Sampler):
"""rearrange gaps within a block randomly.
This sampler will preserve same of the clustering structure of segments."""
def __init__(self, *args, **kwargs):
Sampler.__init__(self, *args, **kwargs)
self.mGapLengths = [x[1] - x[0]
for x in Intervals.complement(self.mObserved, self.mWorkStart, self.mWorkEnd)]
def sample(self):
"""return simulated fragments."""
simulated = []
gaps = self.mGapLengths
random.shuffle(gaps)
start = self.mWorkStart
for x in range(len(self.mLengths)):
start += gaps[x]
simulated.append((start, start + self.mLengths[x]))
start += self.mLengths[x]
return simulated
class CountingResults(object):
"""a container for observed and simulated counts.
"""
def __init__(self, labels):
self.mLabels = labels
self.mTransform = None
self.mEnvelopes = {}
self.mMedians = {}
self.mObservedCounts = None
self.mSimulatedCounts = None
self.mStats = None
def updateFDR(self, obs_pvalues, sim_pvalues):
"""compute fdr stats with given counts.
If obs_pvalues and sim_pvalues are given, computes the FDR (q-value) for the observed p-value.
The q-value is the expected proportion of false positive observations at
the observed p-value.
qvalue = A / B
A: average proportion of simulated data with P-Values < pvalue (expected false positive RATE)
B: number of observed data with P-Values < pvalue (NUMBER of true positives)
As there are several counters and labels, all observed and simulated pvalues
are taken into account.
The method needs to be called after :meth:update.
"""
assert self.mStats is not None, "updateFDR called before calling update."
for label in self.mLabels:
pvalue = self.mStats[label].pvalue
a = scipy.stats.percentileofscore(sim_palues, pvalue) / 100.0
b = scipy.stats.percentileofscore(
obs_pvalues, pvalue) / 100.0 * len(obs_pvalues)
if b >= 0:
qvalue = min(1.0, a / b)
else:
qvalue = 0
self.mStats[label] = self.mStats[label]._replace(qvalue=qvalue)
def update(self):
"""update stats from given counts.
"""
assert self.mObservedCounts is not None, "update called without observed counts."
assert self.mSimulatedCounts is not None, "update called without simulated counts."
self.mStats = {}
cls = collections.namedtuple(
"st", | |
self.var, False,
layer_3.h, 64, a, None)
layer_1_copy = layer_1.copy(state)
layer_2_copy = layer_2.copy(layer_1_copy.h)
layer_3_copy = layer_3.copy(layer_2_copy.h)
layer_q_copy = layer_q.copy(layer_3_copy.h)
layer_a_copy = layer_a.copy(layer_3_copy.h)
h_qvalue = layer_q.h
h_action = layer_a.h
h_qvalue_copy = layer_q_copy.h
h_action_copy = layer_a_copy.h
# Optimizer
loss_qvalue = tf.reduce_mean(tf.square(target_qvalue - h_qvalue))
loss_action = tf.reduce_mean(tf.square(target_action - h_action))
# Exponential decay
# global_step = tf.Variable(0, trainable=False)
# learning_rate = tf.train.exponential_decay(
# self.learning_rate, global_step, 1000000, 0.96, staircase=True)
# Optimization for trainning
# opt_q = tf.train.GradientDescentOptimizer(self.learning_rate)
# opt_a = tf.train.GradientDescentOptimizer(self.learning_rate)
opt_q = tf.train.AdamOptimizer(self.learning_rate)
opt_a = tf.train.AdamOptimizer(self.learning_rate)
grads_and_vars_q = opt_q.compute_gradients(loss_qvalue)
grads_and_vars_a = opt_a.compute_gradients(loss_action)
# Gradient clipping
capped_grads_and_vars_q = [
(tf.clip_by_norm(grad, 100.0), var) if grad is not None
else (None, var) for grad, var in grads_and_vars_q]
capped_grads_and_vars_a = [
(tf.clip_by_norm(grad, 100.0), var) if grad is not None
else (None, var) for grad, var in grads_and_vars_a]
# Gradient applying
self.train_q = opt_q.apply_gradients(capped_grads_and_vars_q)
self.train_a = opt_a.apply_gradients(capped_grads_and_vars_a)
# Evaultion
self.eval_q = h_qvalue
self.eval_a = h_action
self.eval_q_copy = h_qvalue_copy
self.eval_a_copy = h_action_copy
# Place holders
self.placeholder_state = state
self.placeholder_target_qvalue = target_qvalue
self.placeholder_target_action = target_action
self.placeholder_dropout_keep_prob = keep_prob
# Loss
self.loss_q = loss_qvalue
self.loss_a = loss_action
# Session
self.sess = tf.Session(graph=self.graph)
# Saver
self.saver = tf.train.Saver()
success = self.restore(ckpt_file)
if not success:
print '[ NeuralNet ]', 'Variables are randomly initialzed'
self.sess.run(tf.initialize_all_variables())
# Summary
self.merged = tf.merge_all_summaries()
self.writer = tf.train.SummaryWriter(log_dir, self.graph)
self.writer.flush()
self.initialized = True
def eval_qvalue(self, data, from_copy=False):
if from_copy:
return self.sess.run(self.eval_q_copy, feed_dict={
self.placeholder_state: data[0]})
else:
return self.sess.run(self.eval_q, feed_dict={
self.placeholder_state: data[0],
self.placeholder_dropout_keep_prob: 1.0})
def eval_action(self, data, from_copy=False):
if from_copy:
return self.sess.run(self.eval_a_copy, feed_dict={
self.placeholder_state: data[0]})
else:
return self.sess.run(self.eval_a, feed_dict={
self.placeholder_state: data[0],
self.placeholder_dropout_keep_prob: 1.0})
def loss_qvalue(self, data):
val = self.sess.run(self.loss_q, feed_dict={
self.placeholder_state: data[0],
self.placeholder_target_qvalue: data[1],
self.placeholder_dropout_keep_prob: 1.0})
return val
def loss_action(self, data):
val = self.sess.run(self.loss_a, feed_dict={
self.placeholder_state: data[0],
self.placeholder_target_action: data[1],
self.placeholder_dropout_keep_prob: 1.0})
return val
def train_qvalue(self, data):
self.sess.run(self.train_q, feed_dict={
self.placeholder_state: data[0],
self.placeholder_target_qvalue: data[1],
self.placeholder_dropout_keep_prob: self.dropout_keep_prob})
def train_action(self, data):
self.sess.run(self.train_a, feed_dict={
self.placeholder_state: data[0],
self.placeholder_target_action: data[1],
self.placeholder_dropout_keep_prob: self.dropout_keep_prob})
def save_variables(self):
self.var.save(self.sess)
class DeepRL_Multicore(deepRL.DeepRLBase):
def __init__(self, env, nn, ac, warmup_file=None):
deepRL.DeepRLBase.__init__(self, warmup_file)
self.exp_prob_default = 0.5
self.exp_noise_default = 10.0
self.qvalue_knoll_default = 0.7
self.replay_buffer['actor'] = deepRL.ReplayBuffer(500000)
self.replay_buffer['critic'] = deepRL.ReplayBuffer(500000)
self.env = env
self.nn = nn
self.ac = ac
self.train_iter = 10
self.warmup_size = 0
self.max_data_gen = 20000000
self.sample_size = 128
self.target_pos_pool = []
warmup_data = []
while len(self.target_pos_pool) < self.env.num_slave:
self.env.reset()
self.target_pos_pool += self.env.get_target_pos()
if warmup_file is None:
print '[DeepRL]', 'generating warmup data ...'
warmup_data = self.convert_warmup_file_to_buffer_data(
Env(dt, skel_file),
gen_warmup_data([0.1] * 13, 30, 10, 0.01, False))
print len(warmup_data), 'data were generated'
else:
print '[DeepRL]', 'loading warmup file ...'
warmup_data = self.convert_warmup_file_to_buffer_data(
Env(dt, skel_file), None, self.warmup_file)
print len(warmup_data), 'data were loaded'
self.replay_buffer['actor'].append(warmup_data, verbose=True)
self.replay_buffer['critic'].append(warmup_data, verbose=True)
self.warmup_size = self.replay_buffer['actor'].size_accum
num_action_trained = 0
for i in range(1000):
self.train_action(self.sample_size, False)
self.save_variables()
for i in range(2000):
self.train_qvalue(self.sample_size)
num_action_trained += self.train_action(self.sample_size)
if i % 20 == 0:
self.save_variables()
if i % 100 == 0:
self.print_loss()
print 'action_trained:', num_action_trained
num_action_trained = 0
self.save_variables()
def convert_warmup_file_to_buffer_data(
self, env, data=None, file_name=None):
if data is None:
f = open(file_name, 'r')
data = pickle.load(f)
size = len(data)
tuples = []
cnt = 0
for d in data:
state_skel_init = d[0]
state_skel_term = d[1]
action = d[2]
env.reset()
env.world.skel.set_states(state_skel_init)
state_init = env.state()
env.world.skel.set_states(state_skel_term)
state_term = env.state()
reward = env.goodness()
action_delta = self.ac.delta(action)
world_state = state_skel_term
t = [state_init, action_delta, [reward], state_term, world_state]
if basics.check_valid_data(t):
tuples.append(t)
cnt += 1
if cnt % 5000 == 0:
print cnt, ' data were processed'
return tuples
def run(self, max_steps=32, verbose=True):
if self.is_finished_trainning():
return
num_action_trained = 0
self.init_step()
for i in range(max_steps):
# print '[', i, 'steps ] start',
buffer_names, data = self.step()
for j in range(len(data)):
if data[j] is None:
self.env.reset(j)
else:
self.replay_buffer[buffer_names[j]].append([data[j]])
if not self.is_warming_up():
self.train_qvalue(self.sample_size)
num_action_trained += self.train_action(self.sample_size)
# print 'end'
# Print statistics
if verbose:
print '[ stats ]', ' warmup: ', self.is_warming_up(),
for buffer_name in self.replay_buffer.keys():
print '[', buffer_name,
print self.replay_buffer[buffer_name].size,
print self.replay_buffer[buffer_name].size_accum, ']',
if not self.is_warming_up():
self.print_loss()
print ', action_trained:', num_action_trained
def measure_controller_quality(self, max_steps):
# num_slave = self.env.num_slave
# num_iter = len(self.target_pos_pool)/num_slave
# cnt_wingbeat = 0
# sum_reward = 0.0
# for i in range(num_iter):
# self.env.set_target_pos(self.target_pos_pool[num_slave*i:num_slave*(i+1)])
# for j in range(max_steps):
# buffer_names, data = self.step(force_critic=True)
# for k in range(len(data)):
# if data[k] is None:
# continue
# cnt_wingbeat += 1
# sum_reward += data[k][2][0]
# return sum_reward/cnt_wingbeat
return 0.0
def init_step(self):
self.env.reset()
sample_idx = self.replay_buffer[
'critic'].sample_idx(self.env.num_slave)
data = self.sample('critic', sample_idx)
self.env.set_world_state(data[4])
def step(self):
state_inits = self.env.state()
actions = self.get_actions(state_inits)
for a in actions:
a = self.ac.clamp(a)
buffer_names = []
for i in range(self.env.num_slave):
buffer_name = 'critic'
if self.determine_exploration():
actions[i] += np.random.normal(
np.zeros(self.ac.dim),
self.get_exploration_noise() * np.ones(self.ac.dim))
buffer_name = 'actor'
actions[i] = self.ac.clamp(actions[i])
buffer_names.append(buffer_name)
rewards = self.env.step(actions)
state_terms = self.env.state()
world_states = self.env.get_world_state()
tuples = []
for i in range(self.env.num_slave):
act = self.ac.delta(actions[i])
t = [state_inits[i], act, [rewards[i]],
state_terms[i], world_states[i]]
if basics.check_valid_data(t):
tuples.append(t)
else:
tuples.append(None)
return buffer_names, tuples
def sample(self, buffer_name, idx):
if not idx:
raise Exception('DeepRL:sample', 'no index')
data_state = []
data_action = []
data_reward = []
data_state_prime = []
data_world_state = []
for i in idx:
datum = self.replay_buffer[buffer_name].data[i]
data_state.append(datum[0])
data_action.append(datum[1])
data_reward.append(datum[2])
data_state_prime.append(datum[3])
data_world_state.append(datum[4])
return [
np.array(data_state),
np.array(data_action),
np.array(data_reward),
np.array(data_state_prime),
np.array(data_world_state)]
def compute_target_qvalue(self, reward, state_prime):
qvalue_prime = self.nn.eval_qvalue([state_prime], True)
target_qvalue = reward + self.discount_factor * qvalue_prime
return target_qvalue
def train_qvalue(self, sample_size, verbose=False):
sample_idx = self.replay_buffer['critic'].sample_idx(sample_size)
data = self.sample('critic', sample_idx)
if data:
data_state = data[0]
data_reward = data[2]
data_state_prime = data[3]
target_qvalue = self.compute_target_qvalue(
data_reward, data_state_prime)
for i in range(self.train_iter):
self.nn.train_qvalue([data_state, target_qvalue])
if verbose:
print self.nn.loss_qvalue([data_state, target_qvalue])
def train_action(self, sample_size, check_qvalue=True, verbose=False):
sample_idx = self.replay_buffer['actor'].sample_idx(sample_size)
data = self.sample('actor', sample_idx)
if data:
data_state = data[0]
data_action = data[1]
data_reward = data[2]
data_state_prime = data[3]
if check_qvalue:
train_state = []
train_action = []
qvalue = self.nn.eval_qvalue([data_state], True)
target_qvalue = self.compute_target_qvalue(
data_reward, data_state_prime)
for i in xrange(len(qvalue)):
if target_qvalue[i][0] > qvalue[i][0] + self.get_qvalue_knoll():
train_state.append(data_state[i])
train_action.append(data_action[i])
data_state = train_state
data_action = train_action
if len(data_state) > 0:
for i in range(self.train_iter):
self.nn.train_action([data_state, data_action])
if verbose:
print self.nn.loss_action([data_state, data_action])
return len(data_state)
return 0
def train(self):
self.train_qvalue(self.sample_size)
self.train_action(self.sample_size)
def get_action(self, state):
val = self.nn.eval_action([[state]], True)
return val[0]
def get_actions(self, states):
val = self.nn.eval_action([states], True)
return val
def get_qvalue(self, state):
val = self.nn.eval_qvalue([[state]], True)
return val[0][0]
def loss_qvalue(self, sample_size=100, buffer_name='critic'):
sample_idx = self.replay_buffer[buffer_name].sample_idx(sample_size)
data = self.sample(buffer_name, sample_idx)
q = 0.0
if data:
data_state = data[0]
data_reward = data[2]
data_state_prime = data[3]
target_qvalue = self.compute_target_qvalue(
data_reward, data_state_prime)
q = self.nn.loss_qvalue([data_state, target_qvalue])
return q
def loss_action(self, sample_size=100, buffer_name='critic'):
sample_idx = self.replay_buffer[buffer_name].sample_idx(sample_size)
data = self.sample(buffer_name, sample_idx)
if data:
data_state = data[0]
data_action = data[1]
return self.nn.loss_action([data_state, data_action])
else:
return 0.0
def print_loss(self, sample_size=100):
q = self.loss_qvalue(sample_size)
a = self.loss_action(sample_size)
print 'Loss values: ', 'qvalue:', q, 'action:', a,
def save_variables(self):
self.nn.save_variables()
myEnvi = None
myAction = None
myNN = None
myEnviMaster = None
myDeepRL = None
myEye = None
def example_init():
global myEnvi, myDeepRL, myAction, myNN, myEnviMaster, myEye
myEnvi = Env(dt, skel_file)
myAction = Action(myEnvi.skel.controller.actuable_dofs)
myNN = NN('net_turtle_torque')
myNN.initialize(
[len(myEnvi.state()), myAction.dim],
ckpt_load_dir)
myEnviMaster = En_Master_Custom(
max_client,
gen_env,
[dt, skel_file],
Env_Slave_Custom)
myDeepRL = DeepRL_Multicore(myEnviMaster, myNN, myAction, warmup_file)
myEye = eye.Eye(render_func=myEnvi.eye_render, setup=True)
def render_callback():
global gl_init, myEnvi, myDeepRL
if not gl_init:
gl_init = True
example_init()
if myEnvi is None or myDeepRL is None:
return
gl_render.render_ground(color=[0.3, 0.3, 0.3], axis='x')
gl_render.render_ground(color=[0.3, 0.3, 0.3], axis='y')
gl_render.render_ground(color=[0.3, 0.3, 0.3], axis='z')
myEnvi.render()
if flag['Train']:
global cnt_target_update
myDeepRL.run(max_steps)
if not myDeepRL.is_finished_trainning():
cnt_target_update += 1
if cnt_target_update >= max_target_update:
myDeepRL.save_variables()
cnt_target_update = 0
print '------Target Network is updated------',
print 'avg_reward:', myDeepRL.measure_controller_quality(max_steps)
def keyboard_callback(key):
if myEnvi is None or myDeepRL is None:
return
if key == 'r':
print 'Key[r]: reset environment'
global elapsed_time
myEnvi.reset()
elapsed_time = 0.0
elif key == 't':
print myEnvi.state()
elif key == 'p':
elpased = 0.0
while True:
myEnvi.step_forward()
elpased += dt
if elpased >= 0.03333333:
print myEnvi.state()
break
elif key == '[':
elpased = 0.0
while True:
state = myEnvi.state()
reward = myEnvi.goodness()
action = myAction.random(
[myDeepRL.get_random_noise()] * myAction.dim)
myEnvi.step(action)
elpased += dt
if elpased >= 0.03333333:
print 'S:', state, 'A:', action, 'R:', reward
break
elif key == ' ':
elapsed = 0.0
while True:
state = myEnvi.state()
action = myDeepRL.get_action(state)
action = myAction.clamp(action)
qvalue | |
array.
This overrides the _generic NDArray version
"""
# Test for simple case first
if isinstance(arr, NumArray):
if (arr.nelements() == 0 and self.nelements() == 0):
return
if (arr._type == self._type and
self._shape == arr._shape and
arr._byteorder == self._byteorder and
_gen.product(arr._strides) != 0 and
arr.isaligned() and self.isaligned()):
name = 'copy'+`self._itemsize`+'bytes'
cfunc = ( _bytes.functionDict.get(name) or
_bytes.functionDict["copyNbytes"])
cfunc(self._shape, arr._data, arr._byteoffset, arr._strides,
self._data, self._byteoffset, self._strides,
self._itemsize)
return
else:
arr = array(arr)
if self.rank == 0 and arr.nelements() == 1:
barr = arr.view()
barr._shape = ()
else:
barr = self._broadcast(arr)
if barr is None:
raise ValueError('array sizes must be consistent')
ufunc._copyFromAndConvert(barr, self)
def view(self):
"""Returns a new array object which refers to the same data as the
original array. The new array object can be manipulated (reshaped,
restrided, new attributes, etc.) without affecting the original array.
Modifications of the array data *do* affect the original array.
"""
v = _gen.NDArray.view(self)
v._type = self._type
v._byteorder = self._byteorder
return v
def __getstate__(self):
"""returns state of NumArray for pickling."""
# assert not hasattr(self, "_shadows") # Not a good idea for pickling.
state = _gen.NDArray.__getstate__(self)
state["_byteorder"] = self._byteorder
state["_type"] = self._type.name
return state
def __setstate__(self, state):
"""sets state of NumArray after unpickling."""
_gen.NDArray.__setstate__(self, state)
self._byteorder = state["_byteorder"]
self._type = _nt.getType(state["_type"])
def _put(self, indices, values, **keywds):
ufunc._put(self, indices, values, **keywds)
def _take(self, indices, **keywds):
return ufunc._take(self, indices, **keywds)
def getreal(self):
if isinstance(self._type, _nt.ComplexType):
t = _realtype(self._type)
arr = NumArray(self._shape, t, buffer=self._data,
byteoffset=self._byteoffset,
bytestride=self._bytestride,
byteorder=self._byteorder)
arr._strides = self._strides[:]
return arr
elif isinstance(self._type, _nt.FloatingType):
return self
else:
return self.astype(_nt.Float64)
def setreal(self, value):
if isinstance(self._type, (_nt.ComplexType, _nt.FloatingType)):
self.getreal()[:] = value
else:
raise TypeError("Can't setreal() on a non-floating-point array")
real = property(getreal, setreal,
doc="real component of a non-complex numarray")
def getimag(self):
if isinstance(self._type, _nt.ComplexType):
t = _realtype(self._type)
arr = NumArray(self._shape, t, buffer=self._data,
byteoffset=self._byteoffset+t.bytes,
bytestride=self._bytestride,
byteorder=self._byteorder)
arr._strides = self._strides[:]
return arr
else:
zeros = self.new(_nt.Float64)
zeros[:] = 0.0
return zeros
def setimag(self, value):
if isinstance(self._type, _nt.ComplexType):
self.getimag()[:] = value
else:
raise TypeError("Can't set imaginary component of a non-comlex type")
imag = property(getimag, setimag,
doc="imaginary component of complex array")
real = property(getreal, setreal,
doc="real component of complex array")
setimaginary = setimag
getimaginary = getimag
imaginary = imag
def conjugate(self):
"""Returns the conjugate a - bj of complex array a + bj."""
a = self.copy()
ufunc.minus(a.getimag(), a.getimag())
return a
def togglebyteorder(self):
"""reverses the state of the _byteorder attribute: little <-> big."""
self._byteorder = {"little":"big","big":"little"}[self._byteorder]
def byteswap(self):
"""Byteswap data in place, leaving the _byteorder attribute untouched.
"""
if self._itemsize == 1:
return
if isinstance(self._type, _nt.ComplexType):
fname = "byteswap" + self._type.name
else:
fname = "byteswap"+str(self._itemsize)+"bytes"
cfunc = _bytes.functionDict[fname]
cfunc(self._shape,
self._data, self._byteoffset, self._strides,
self._data, self._byteoffset, self._strides)
_byteswap = byteswap # alias to keep old code working.
def byteswapped(self):
"""returns a byteswapped copy of self."""
b = self.copy()
b._byteswap()
return b
def sinfo(self):
"""info() prints out the key attributes of a numarray."""
s = _gen.NDArray.sinfo(self)
s += "byteorder: " + repr(self._byteorder) + "\n"
s += "byteswap: " + repr(self.isbyteswapped()) + "\n"
s += "type: " + repr(self._type) + "\n"
return s
def astype(self, type=None, typecode=None, dtype=None):
"""Return a copy of the array converted to the given type"""
type = _nt._typeFromKeywords(type, typecode, dtype)
if type is None:
type = self._type
if type == self._type:
# always return a copy even if type is same
return self.copy()
if type._conv:
retarr = self.__class__(buffer=None, shape=self._shape, type=type)
if retarr.nelements() == 0:
return retarr
if self.is_c_array():
_ufunc.CheckFPErrors()
cfunc = self._type._conv.astype[type.name]
cfunc(self.nelements(), 1, 1,
((self._data, self._byteoffset), (retarr._data, 0)))
errorstatus = _ufunc.CheckFPErrors()
if errorstatus:
ufunc.handleError(errorstatus, " during type conversion")
else:
ufunc._copyFromAndConvert(self, retarr)
elif type.fromtype:
retarr = type.fromtype(self)
else:
raise TypeError("Don't know how to convert from %s to %s" %
(self._type.name, type.name))
return retarr
def __tonumtype__(self, type):
"""__tonumtype__ supports C-API NA_setFromPythonScalar permitting a rank-0
array to be used in lieu of a numerical scalar value."""
if self.rank != 0:
raise ValueError("Can't use non-rank-0 array as a scalar.")
if type is not self.type():
s = self.astype(type)
else:
s = self
return s[()]
def is_c_array(self):
"""returns 1 iff an array is c-contiguous, aligned, and not
byteswapped."""
return (self.iscontiguous() and self.isaligned() and not
self.isbyteswapped()) != 0
def is_f_array(self):
"""returns 1 iff an array is fortan-contiguous, aligned, and not
byteswapped."""
return (self.is_fortran_contiguous() and self.isaligned() and not
self.isbyteswapped())
def new(self, type=None):
"""Return a new array of given type with same shape as this array
Note this only creates the array; it does not copy the data.
"""
if type is None:
type = self._type
return self.__class__(shape=self._shape, type=type)
def type(self):
"""Return the type object for the array"""
return self._type
def copy(self):
"""Returns a native byte order copy of the array."""
c = _gen.NDArray.copy(self)
c._byteorder = self._byteorder
c._type = self._type
if self.isbyteswapped():
c.byteswap()
c.togglebyteorder()
return c
def __str__(self):
return array_str(self)
def __repr__(self):
return array_repr(self)
try:
return array_repr(self)
except:
return "<<DEBUG NumArray.__repr__(): " + self.sinfo().rstrip() + ">>"
if _PROTOTYPE:
def __int__(self):
if self.rank is 0:
return int(self[()])
elif self._shape == (1,):
return int(self[0])
else:
raise TypeError, "Only rank-0 numarray can be cast to integers."
def __float__(self):
if self.rank is 0:
return float(self[()])
elif self._shape == (1,):
return float(self[0])
else:
raise TypeError, "Only rank-0 or shape=(1,) numarray can be cast to floats."
def __complex__(self):
if self.rank is 0:
return complex(self[()])
elif self._shape == (1,):
return complex(self[0])
else:
raise TypeError, "Only rank-0 or shape=(1,) numarray can be cast to complex."
def __abs__(self): return ufunc.abs(self)
def __neg__(self): return ufunc.minus(self)
def __invert__(self): return ufunc.bitwise_not(self)
def __pos__(self): return self
def __add__(self, operand):
if (isinstance(operand, UsesOpPriority) and
self.op_priority < operand.op_priority):
return operand.__radd__(self)
else:
return ufunc.add(self, operand)
def __radd__(self, operand):
if (isinstance(operand, UsesOpPriority) and
self.op_priority < operand.op_priority):
return operand.__add__(self)
else:
r = ufunc.add(operand, self)
if isinstance(r, NumArray):
r.__class__ = self.__class__
return r
def __sub__(self, operand):
if (isinstance(operand, UsesOpPriority) and
self.op_priority < operand.op_priority):
return operand.__rsub__(self)
else:
return ufunc.subtract(self, operand)
def __rsub__(self, operand):
if (isinstance(operand, UsesOpPriority) and
self.op_priority < operand.op_priority):
return operand.__sub__(self)
else:
r = ufunc.subtract(operand, self)
if isinstance(r, NumArray):
r.__class__ = self.__class__
return r
def __mul__(self, operand):
if (isinstance(operand, UsesOpPriority) and
self.op_priority < operand.op_priority):
return operand.__rmul__(self)
else:
return ufunc.multiply(self, operand)
def __rmul__(self, operand):
if (isinstance(operand, UsesOpPriority) and
self.op_priority < operand.op_priority):
return operand.__mul__(self)
else:
r = ufunc.multiply(operand, self)
if isinstance(r, NumArray):
r.__class__ = self.__class__
return r
def __div__(self, operand):
if (isinstance(operand, UsesOpPriority) and
self.op_priority < operand.op_priority):
return operand.__rdiv__(self)
else:
return ufunc.divide(self, operand)
def __truediv__(self, operand):
if (isinstance(operand, UsesOpPriority) and
self.op_priority < operand.op_priority):
return operand.__rtruediv__(self)
else:
return ufunc.true_divide(self, operand)
def __floordiv__(self, operand):
if (isinstance(operand, UsesOpPriority) and
self.op_priority < operand.op_priority):
return operand.__rfloordiv__(self)
else:
return ufunc.floor_divide(self, operand)
def __rdiv__(self, operand):
if (isinstance(operand, UsesOpPriority) and
self.op_priority < operand.op_priority):
return operand.__div__(self)
else:
r = ufunc.divide(operand, self)
if isinstance(r, NumArray):
r.__class__ = self.__class__
return r
def __rtruediv__(self, operand):
if (isinstance(operand, UsesOpPriority) and
self.op_priority < operand.op_priority):
return operand.__truediv__(self)
else:
r = ufunc.true_divide(operand, self)
if isinstance(r, NumArray):
r.__class__ = self.__class__
return r
def __rfloordiv__(self, operand):
if (isinstance(operand, UsesOpPriority) and
self.op_priority < operand.op_priority):
return operand.__floordiv__(self)
else:
r = ufunc.floor_divide(operand, self)
if isinstance(r, NumArray):
r.__class__ = self.__class__
return r
def __mod__(self, operand):
if (isinstance(operand, UsesOpPriority) and
self.op_priority < operand.op_priority):
return operand.__rmod__(self)
else:
return ufunc.remainder(self, operand)
def __rmod__(self, operand):
if (isinstance(operand, UsesOpPriority) and
self.op_priority < operand.op_priority):
return operand.__mod__(self)
else:
r = ufunc.remainder(operand, self)
if isinstance(r, NumArray):
r.__class__ = self.__class__
return r
def __divmod__(self,operand):
"""returns the tuple (self/operand, self%operand)."""
if (isinstance(operand, UsesOpPriority) and
self.op_priority < operand.op_priority):
return operand.__rdivmod__(self)
else:
return ufunc.divide_remainder(self, operand)
def __rdivmod__(self,operand):
"""returns the tuple (operand/self, operand%self)."""
if (isinstance(operand, UsesOpPriority) and
self.op_priority < operand.op_priority):
return operand.__divmod__(self)
else:
r = ufunc.divide_remainder(operand, self)
if isinstance(r, NumArray):
r.__class__ = self.__class__
return r
def __pow__(self, operand):
if (isinstance(operand, UsesOpPriority) and
self.op_priority < operand.op_priority):
return operand.__rpow__(self)
else:
return ufunc.power(self, operand)
def __rpow__(self, operand):
if (isinstance(operand, UsesOpPriority) and
self.op_priority < operand.op_priority):
return operand.__pow__(self)
else:
r = ufunc.power(operand, self)
if isinstance(r, NumArray):
r.__class__ = self.__class__
return r
def __and__(self, operand):
if (isinstance(operand, UsesOpPriority) and
self.op_priority < operand.op_priority):
return operand.__rand__(self)
else:
return ufunc.bitwise_and(self, operand)
def __rand__(self, operand):
| |
from data import *
from utils.augmentations import SSDAugmentation, BaseTransform
from utils.functions import MovingAverage, SavePath
from utils import timer
from layers.modules import MultiBoxLoss
from yolact import Yolact
import os
import sys
import time
import math
from pathlib import Path
import torch
from torch.autograd import Variable
import torch.nn as nn
import torch.optim as optim
import torch.backends.cudnn as cudnn
import torch.nn.init as init
import torch.utils.data as data
import numpy as np
import argparse
import datetime
from PIL import Image
# Oof
import eval as eval_script
def str2bool(v):
return v.lower() in ("yes", "true", "t", "1")
parser = argparse.ArgumentParser(
description='Yolact Training Script')
parser.add_argument('--batch_size', default=1, type=int,
help='Batch size for training')
parser.add_argument('--resume', default=None, type=str,
help='Checkpoint state_dict file to resume training from. If this is "interrupt"'\
', the model will resume training from the interrupt file.')
parser.add_argument('--start_iter', default=0, type=int,
help='Resume training at this iter. If this is -1, the iteration will be'\
'determined from the file name.')
parser.add_argument('--num_workers', default=4, type=int,
help='Number of workers used in dataloading')
parser.add_argument('--cuda', default=True, type=str2bool,
help='Use CUDA to train model')
parser.add_argument('--lr', '--learning_rate', default=None, type=float,
help='Initial learning rate. Leave as None to read this from the config.')
parser.add_argument('--momentum', default=None, type=float,
help='Momentum for SGD. Leave as None to read this from the config.')
parser.add_argument('--decay', '--weight_decay', default=None, type=float,
help='Weight decay for SGD. Leave as None to read this from the config.')
parser.add_argument('--gamma', default=None, type=float,
help='For each lr step, what to multiply the lr by. Leave as None to read this from the config.')
parser.add_argument('--save_folder', default='weights/',
help='Directory for saving checkpoint models')
parser.add_argument('--config', default=None,
help='The config object to use.')
parser.add_argument('--save_interval', default=10000, type=int,
help='The number of iterations between saving the model.')
parser.add_argument('--validation_size', default=5000, type=int,
help='The number of images to use for validation.')
parser.add_argument('--validation_epoch', default=2, type=int,
help='Output validation information every n iterations. If -1, do no validation.')
parser.add_argument('--keep_latest', dest='keep_latest', action='store_true',
help='Only keep the latest checkpoint instead of each one.')
parser.add_argument('--keep_latest_interval', default=100000, type=int,
help='When --keep_latest is on, don\'t delete the latest file at these intervals. This should be a multiple of save_interval or 0.')
parser.add_argument('--dataset', default=None, type=str,
help='If specified, override the dataset specified in the config with this one (example: coco2017_dataset).')
parser.set_defaults(keep_latest=False)
args = parser.parse_args()
if args.config is not None:
set_cfg(args.config)
if args.dataset is not None:
set_dataset(args.dataset)
# Update training parameters from the config if necessary
def replace(name):
if getattr(args, name) == None: setattr(args, name, getattr(cfg, name))
replace('lr')
replace('decay')
replace('gamma')
replace('momentum')
loss_types = ['B', 'C', 'M', 'P', 'D', 'E', 'S']
if torch.cuda.is_available():
if args.cuda:
torch.set_default_tensor_type('torch.cuda.FloatTensor')
if not args.cuda:
print("WARNING: It looks like you have a CUDA device, but aren't " +
"using CUDA.\nRun with --cuda for optimal training speed.")
torch.set_default_tensor_type('torch.FloatTensor')
else:
torch.set_default_tensor_type('torch.FloatTensor')
class ScatterWrapper:
""" Input is any number of lists. This will preserve them through a dataparallel scatter. """
def __init__(self, *args):
for arg in args:
if not isinstance(arg, list):
print('Warning: ScatterWrapper got input of non-list type.')
self.args = args
self.batch_size = len(args[0])
def make_mask(self):
out = torch.Tensor(list(range(self.batch_size))).long()
if args.cuda: return out.cuda()
else: return out
def get_args(self, mask):
device = mask.device
mask = [int(x) for x in mask]
out_args = [[] for _ in self.args]
for out, arg in zip(out_args, self.args):
for idx in mask:
x = arg[idx]
if isinstance(x, torch.Tensor):
x = x.to(device)
out.append(x)
return out_args
def train():
if not os.path.exists(args.save_folder):
os.mkdir(args.save_folder)
dataset = COCODetection(image_path=cfg.dataset.train_images,
info_file=cfg.dataset.train_info,
transform=SSDAugmentation(MEANS))
'''
dataset = COCODetection(image_path=cfg.dataset.train_images,
info_file=cfg.dataset.train_info,
transform=BaseTransform(MEANS))
'''
if args.validation_epoch > 0:
setup_eval()
val_dataset = COCODetection(image_path=cfg.dataset.valid_images,
info_file=cfg.dataset.valid_info,
transform=BaseTransform(MEANS))
# Parallel wraps the underlying module, but when saving and loading we don't want that
yolact_net = Yolact()
net = yolact_net
net.train()
# I don't use the timer during training (I use a different timing method).
# Apparently there's a race condition with multiple GPUs.
timer.disable_all()
# Both of these can set args.resume to None, so do them before the check
if args.resume == 'interrupt':
args.resume = SavePath.get_interrupt(args.save_folder)
elif args.resume == 'latest':
args.resume = SavePath.get_latest(args.save_folder, cfg.name)
if args.resume is not None:
print('Resuming training, loading {}...'.format(args.resume))
yolact_net.load_weights(args.resume)
if args.start_iter == -1:
args.start_iter = SavePath.from_str(args.resume).iteration
else:
print('Initializing weights...')
yolact_net.init_weights(backbone_path=args.save_folder + cfg.backbone.path)
optimizer = optim.SGD(net.parameters(), lr=args.lr, momentum=args.momentum,
weight_decay=args.decay)
criterion = MultiBoxLoss(num_classes=cfg.num_classes,
pos_threshold=cfg.positive_iou_threshold,
neg_threshold=cfg.negative_iou_threshold,
negpos_ratio=3)
if args.cuda:
cudnn.benchmark = True
net = nn.DataParallel(net).cuda()
criterion = nn.DataParallel(criterion).cuda()
# loss counters
loc_loss = 0
conf_loss = 0
iteration = max(args.start_iter, 0)
last_time = time.time()
epoch_size = len(dataset) // args.batch_size
num_epochs = math.ceil(cfg.max_iter / epoch_size)
# Which learning rate adjustment step are we on? lr' = lr * gamma ^ step_index
step_index = 0
data_loader = data.DataLoader(dataset, args.batch_size,
num_workers=args.num_workers,
shuffle=True, collate_fn=detection_collate,
pin_memory=True)
save_path = lambda epoch, iteration: SavePath(cfg.name, epoch, iteration).get_path(root=args.save_folder)
time_avg = MovingAverage()
global loss_types # Forms the print order
loss_avgs = { k: MovingAverage(100) for k in loss_types }
print('Begin training!')
print()
# try-except so you can use ctrl+c to save early and stop training
try:
for epoch in range(num_epochs):
# Resume from start_iter
if (epoch+1)*epoch_size < iteration:
continue
for datum in data_loader:
# Stop if we've reached an epoch if we're resuming from start_iter
if iteration == (epoch+1)*epoch_size:
break
# Stop at the configured number of iterations even if mid-epoch
if iteration == cfg.max_iter:
break
# Change a config setting if we've reached the specified iteration
changed = False
for change in cfg.delayed_settings:
if iteration >= change[0]:
changed = True
cfg.replace(change[1])
# Reset the loss averages because things might have changed
for avg in loss_avgs:
avg.reset()
# If a config setting was changed, remove it from the list so we don't keep checking
if changed:
cfg.delayed_settings = [x for x in cfg.delayed_settings if x[0] > iteration]
# Warm up by linearly interpolating the learning rate from some smaller value
if cfg.lr_warmup_until > 0 and iteration <= cfg.lr_warmup_until:
set_lr(optimizer, (args.lr - cfg.lr_warmup_init) * (iteration / cfg.lr_warmup_until) + cfg.lr_warmup_init)
# Adjust the learning rate at the given iterations, but also if we resume from past that iteration
while step_index < len(cfg.lr_steps) and iteration >= cfg.lr_steps[step_index]:
step_index += 1
set_lr(optimizer, args.lr * (args.gamma ** step_index))
# Load training data
# Note, for training on multiple gpus this will use the custom replicate and gather I wrote up there
images, targets, masks, num_crowds = prepare_data(datum)
# Forward Pass
out = net(images)
# Compute Loss
optimizer.zero_grad()
wrapper = ScatterWrapper(targets, masks, num_crowds)
losses = criterion(out, wrapper, wrapper.make_mask())
losses = { k: v.mean() for k,v in losses.items() } # Mean here because Dataparallel
loss = sum([losses[k] for k in losses])
# Backprop
loss.backward() # Do this to free up vram even if loss is not finite
if torch.isfinite(loss).item():
optimizer.step()
# Add the loss to the moving average for bookkeeping
for k in losses:
loss_avgs[k].add(losses[k].item())
cur_time = time.time()
elapsed = cur_time - last_time
last_time = cur_time
# Exclude graph setup from the timing information
if iteration != args.start_iter:
time_avg.add(elapsed)
if iteration % 10 == 0:
eta_str = str(datetime.timedelta(seconds=(cfg.max_iter-iteration) * time_avg.get_avg())).split('.')[0]
total = sum([loss_avgs[k].get_avg() for k in losses])
loss_labels = sum([[k, loss_avgs[k].get_avg()] for k in loss_types if k in losses], [])
print(('[%3d] %7d ||' + (' %s: %.3f |' * len(losses)) + ' T: %.3f || ETA: %s || timer: %.3f')
% tuple([epoch, iteration] + loss_labels + [total, eta_str, elapsed]), flush=True)
iteration += 1
if iteration % args.save_interval == 0 and iteration != args.start_iter:
if args.keep_latest:
latest = SavePath.get_latest(args.save_folder, cfg.name)
print('Saving state, iter:', iteration)
yolact_net.save_weights(save_path(epoch, iteration))
if args.keep_latest and latest is not None:
if args.keep_latest_interval <= 0 or iteration % args.keep_latest_interval != args.save_interval:
print('Deleting old save...')
os.remove(latest)
# This is done per epoch
if args.validation_epoch > 0:
if epoch % args.validation_epoch == 0 and epoch > 0:
compute_validation_map(yolact_net, val_dataset)
except KeyboardInterrupt:
print('Stopping early. Saving network...')
# Delete previous copy of the interrupted network so we don't spam the weights folder
SavePath.remove_interrupt(args.save_folder)
yolact_net.save_weights(save_path(epoch, repr(iteration) + '_interrupt'))
exit()
yolact_net.save_weights(save_path(epoch, iteration))
def set_lr(optimizer, new_lr):
for param_group in optimizer.param_groups:
param_group['lr'] = new_lr
def prepare_data(datum):
images, (targets, masks, num_crowds) = datum
if args.cuda:
images = Variable(images.cuda(), requires_grad=False)
targets = [Variable(ann.cuda(), requires_grad=False) for ann in targets]
masks = [Variable(mask.cuda(), requires_grad=False) for mask in masks]
else:
images = Variable(images, requires_grad=False)
targets = [Variable(ann, requires_grad=False) for ann in targets]
masks = [Variable(mask, requires_grad=False) for mask in masks]
return images, targets, masks, num_crowds
def compute_validation_loss(net, data_loader, criterion):
global loss_types
with torch.no_grad():
losses = {}
| |
machine):
super(MockResult, self).__init__(mylogger, label, logging_level, machine)
def FindFilesInResultsDir(self, find_args):
return ''
# pylint: disable=arguments-differ
def GetKeyvals(self, temp=False):
if temp:
pass
return keyvals
class ResultTest(unittest.TestCase):
"""Result test class."""
def __init__(self, *args, **kwargs):
super(ResultTest, self).__init__(*args, **kwargs)
self.callFakeProcessResults = False
self.fakeCacheReturnResult = None
self.callGetResultsDir = False
self.callProcessResults = False
self.callGetPerfReportFiles = False
self.kv_dict = None
self.tmpdir = ''
self.callGetNewKeyvals = False
self.callGetResultsFile = False
self.callGetPerfDataFiles = False
self.callGetTurbostatFile = False
self.callGetCpustatsFile = False
self.callGetTopFile = False
self.callGetWaitTimeFile = False
self.args = None
self.callGatherPerfResults = False
self.mock_logger = mock.Mock(spec=logger.Logger)
self.mock_cmd_exec = mock.Mock(spec=command_executer.CommandExecuter)
self.mock_label = MockLabel('mock_label', 'build', 'chromeos_image',
'autotest_dir', 'debug_dir', '/tmp', 'lumpy',
'remote', 'image_args', 'cache_dir', 'average',
'gcc', False, None)
def testCreateFromRun(self):
result = MockResult.CreateFromRun(logger.GetLogger(), 'average',
self.mock_label, 'remote1', OUTPUT, error,
0, True)
self.assertEqual(result.keyvals, keyvals)
self.assertEqual(result.chroot_results_dir,
'/tmp/test_that.PO1234567/platform_LibCBench')
self.assertEqual(result.results_dir,
'/tmp/chroot/tmp/test_that.PO1234567/platform_LibCBench')
self.assertEqual(result.retval, 0)
def setUp(self):
self.result = Result(self.mock_logger, self.mock_label, 'average',
self.mock_cmd_exec)
@mock.patch.object(os.path, 'isdir')
@mock.patch.object(command_executer.CommandExecuter, 'RunCommand')
@mock.patch.object(command_executer.CommandExecuter, 'CopyFiles')
def test_copy_files_to(self, mock_copyfiles, mock_runcmd, mock_isdir):
files = ['src_file_1', 'src_file_2', 'src_file_3']
dest_dir = '/tmp/test'
self.mock_cmd_exec.RunCommand = mock_runcmd
self.mock_cmd_exec.CopyFiles = mock_copyfiles
mock_copyfiles.return_value = 0
# test 1. dest_dir exists; CopyFiles returns 0.
mock_isdir.return_value = True
self.result.CopyFilesTo(dest_dir, files)
self.assertEqual(mock_runcmd.call_count, 0)
self.assertEqual(mock_copyfiles.call_count, 3)
first_args = mock_copyfiles.call_args_list[0][0]
second_args = mock_copyfiles.call_args_list[1][0]
third_args = mock_copyfiles.call_args_list[2][0]
self.assertEqual(first_args, ('src_file_1', '/tmp/test/src_file_1.0'))
self.assertEqual(second_args, ('src_file_2', '/tmp/test/src_file_2.0'))
self.assertEqual(third_args, ('src_file_3', '/tmp/test/src_file_3.0'))
mock_runcmd.reset_mock()
mock_copyfiles.reset_mock()
# test 2. dest_dir does not exist; CopyFiles returns 0.
mock_isdir.return_value = False
self.result.CopyFilesTo(dest_dir, files)
self.assertEqual(mock_runcmd.call_count, 3)
self.assertEqual(mock_copyfiles.call_count, 3)
self.assertEqual(mock_runcmd.call_args_list[0],
mock_runcmd.call_args_list[1])
self.assertEqual(mock_runcmd.call_args_list[0],
mock_runcmd.call_args_list[2])
self.assertEqual(mock_runcmd.call_args_list[0][0], ('mkdir -p /tmp/test',))
# test 3. CopyFiles returns 1 (fails).
mock_copyfiles.return_value = 1
self.assertRaises(Exception, self.result.CopyFilesTo, dest_dir, files)
@mock.patch.object(Result, 'CopyFilesTo')
def test_copy_results_to(self, mockCopyFilesTo):
perf_data_files = [
'/tmp/perf.data.0', '/tmp/perf.data.1', '/tmp/perf.data.2'
]
perf_report_files = [
'/tmp/perf.report.0', '/tmp/perf.report.1', '/tmp/perf.report.2'
]
self.result.perf_data_files = perf_data_files
self.result.perf_report_files = perf_report_files
self.result.CopyFilesTo = mockCopyFilesTo
self.result.CopyResultsTo('/tmp/results/')
self.assertEqual(mockCopyFilesTo.call_count, 2)
self.assertEqual(len(mockCopyFilesTo.call_args_list), 2)
self.assertEqual(mockCopyFilesTo.call_args_list[0][0],
('/tmp/results/', perf_data_files))
self.assertEqual(mockCopyFilesTo.call_args_list[1][0],
('/tmp/results/', perf_report_files))
def test_get_new_keyvals(self):
kv_dict = {}
def FakeGetDataMeasurementsFiles():
filename = os.path.join(os.getcwd(), 'unittest_keyval_file.txt')
return [filename]
self.result.GetDataMeasurementsFiles = FakeGetDataMeasurementsFiles
kv_dict2, udict = self.result.GetNewKeyvals(kv_dict)
self.assertEqual(
kv_dict2, {
u'Box2D__Box2D': 4775,
u'Mandreel__Mandreel': 6620,
u'Gameboy__Gameboy': 9901,
u'Crypto__Crypto': 8737,
u'telemetry_page_measurement_results__num_errored': 0,
u'telemetry_page_measurement_results__num_failed': 0,
u'PdfJS__PdfJS': 6455,
u'Total__Score': 7918,
u'EarleyBoyer__EarleyBoyer': 14340,
u'MandreelLatency__MandreelLatency': 5188,
u'CodeLoad__CodeLoad': 6271,
u'DeltaBlue__DeltaBlue': 14401,
u'Typescript__Typescript': 9815,
u'SplayLatency__SplayLatency': 7653,
u'zlib__zlib': 16094,
u'Richards__Richards': 10358,
u'RegExp__RegExp': 1765,
u'NavierStokes__NavierStokes': 9815,
u'Splay__Splay': 4425,
u'RayTrace__RayTrace': 16600
})
self.assertEqual(
udict, {
u'Box2D__Box2D': u'score',
u'Mandreel__Mandreel': u'score',
u'Gameboy__Gameboy': u'score',
u'Crypto__Crypto': u'score',
u'telemetry_page_measurement_results__num_errored': u'count',
u'telemetry_page_measurement_results__num_failed': u'count',
u'PdfJS__PdfJS': u'score',
u'Total__Score': u'score',
u'EarleyBoyer__EarleyBoyer': u'score',
u'MandreelLatency__MandreelLatency': u'score',
u'CodeLoad__CodeLoad': u'score',
u'DeltaBlue__DeltaBlue': u'score',
u'Typescript__Typescript': u'score',
u'SplayLatency__SplayLatency': u'score',
u'zlib__zlib': u'score',
u'Richards__Richards': u'score',
u'RegExp__RegExp': u'score',
u'NavierStokes__NavierStokes': u'score',
u'Splay__Splay': u'score',
u'RayTrace__RayTrace': u'score'
})
def test_append_telemetry_units(self):
kv_dict = {
u'Box2D__Box2D': 4775,
u'Mandreel__Mandreel': 6620,
u'Gameboy__Gameboy': 9901,
u'Crypto__Crypto': 8737,
u'PdfJS__PdfJS': 6455,
u'Total__Score': 7918,
u'EarleyBoyer__EarleyBoyer': 14340,
u'MandreelLatency__MandreelLatency': 5188,
u'CodeLoad__CodeLoad': 6271,
u'DeltaBlue__DeltaBlue': 14401,
u'Typescript__Typescript': 9815,
u'SplayLatency__SplayLatency': 7653,
u'zlib__zlib': 16094,
u'Richards__Richards': 10358,
u'RegExp__RegExp': 1765,
u'NavierStokes__NavierStokes': 9815,
u'Splay__Splay': 4425,
u'RayTrace__RayTrace': 16600
}
units_dict = {
u'Box2D__Box2D': u'score',
u'Mandreel__Mandreel': u'score',
u'Gameboy__Gameboy': u'score',
u'Crypto__Crypto': u'score',
u'PdfJS__PdfJS': u'score',
u'Total__Score': u'score',
u'EarleyBoyer__EarleyBoyer': u'score',
u'MandreelLatency__MandreelLatency': u'score',
u'CodeLoad__CodeLoad': u'score',
u'DeltaBlue__DeltaBlue': u'score',
u'Typescript__Typescript': u'score',
u'SplayLatency__SplayLatency': u'score',
u'zlib__zlib': u'score',
u'Richards__Richards': u'score',
u'RegExp__RegExp': u'score',
u'NavierStokes__NavierStokes': u'score',
u'Splay__Splay': u'score',
u'RayTrace__RayTrace': u'score'
}
results_dict = self.result.AppendTelemetryUnits(kv_dict, units_dict)
self.assertEqual(
results_dict, {
u'Box2D__Box2D': [4775, u'score'],
u'Splay__Splay': [4425, u'score'],
u'Gameboy__Gameboy': [9901, u'score'],
u'Crypto__Crypto': [8737, u'score'],
u'PdfJS__PdfJS': [6455, u'score'],
u'Total__Score': [7918, u'score'],
u'EarleyBoyer__EarleyBoyer': [14340, u'score'],
u'MandreelLatency__MandreelLatency': [5188, u'score'],
u'DeltaBlue__DeltaBlue': [14401, u'score'],
u'SplayLatency__SplayLatency': [7653, u'score'],
u'Mandreel__Mandreel': [6620, u'score'],
u'Richards__Richards': [10358, u'score'],
u'zlib__zlib': [16094, u'score'],
u'CodeLoad__CodeLoad': [6271, u'score'],
u'Typescript__Typescript': [9815, u'score'],
u'RegExp__RegExp': [1765, u'score'],
u'RayTrace__RayTrace': [16600, u'score'],
u'NavierStokes__NavierStokes': [9815, u'score']
})
@mock.patch.object(misc, 'GetInsideChrootPath')
@mock.patch.object(tempfile, 'mkdtemp')
@mock.patch.object(command_executer.CommandExecuter, 'RunCommand')
@mock.patch.object(command_executer.CommandExecuter,
'ChrootRunCommandWOutput')
def test_get_keyvals(self, mock_chrootruncmd, mock_runcmd, mock_mkdtemp,
mock_getpath):
self.kv_dict = {}
self.callGetNewKeyvals = False
def reset():
self.kv_dict = {}
self.callGetNewKeyvals = False
mock_chrootruncmd.reset_mock()
mock_runcmd.reset_mock()
mock_mkdtemp.reset_mock()
mock_getpath.reset_mock()
def FakeGetNewKeyvals(kv_dict):
self.kv_dict = kv_dict
self.callGetNewKeyvals = True
return_kvdict = {'first_time': 680, 'Total': 10}
return_udict = {'first_time': 'ms', 'Total': 'score'}
return return_kvdict, return_udict
mock_mkdtemp.return_value = TMP_DIR1
mock_chrootruncmd.return_value = [
'', ('%s,PASS\n%s/telemetry_Crosperf,PASS\n') % (TMP_DIR1, TMP_DIR1), ''
]
mock_getpath.return_value = TMP_DIR1
self.result.ce.ChrootRunCommandWOutput = mock_chrootruncmd
self.result.ce.RunCommand = mock_runcmd
self.result.GetNewKeyvals = FakeGetNewKeyvals
self.result.suite = 'telemetry_Crosperf'
self.result.results_dir = '/tmp/test_that_resultsNmq'
# Test 1. no self.temp_dir.
res = self.result.GetKeyvals()
self.assertTrue(self.callGetNewKeyvals)
self.assertEqual(self.kv_dict, {'': 'PASS', 'telemetry_Crosperf': 'PASS'})
self.assertEqual(mock_runcmd.call_count, 1)
self.assertEqual(mock_runcmd.call_args_list[0][0],
('cp -r /tmp/test_that_resultsNmq/* %s' % TMP_DIR1,))
self.assertEqual(mock_chrootruncmd.call_count, 1)
self.assertEqual(
mock_chrootruncmd.call_args_list[0][0],
('/tmp', ('./generate_test_report --no-color --csv %s') % TMP_DIR1))
self.assertEqual(mock_getpath.call_count, 1)
self.assertEqual(mock_mkdtemp.call_count, 1)
self.assertEqual(res, {'Total': [10, 'score'], 'first_time': [680, 'ms']})
# Test 2. self.temp_dir
reset()
mock_chrootruncmd.return_value = [
'', ('/tmp/tmpJCajRG,PASS\n/tmp/tmpJCajRG/'
'telemetry_Crosperf,PASS\n'), ''
]
mock_getpath.return_value = '/tmp/tmpJCajRG'
self.result.temp_dir = '/tmp/tmpJCajRG'
res = self.result.GetKeyvals()
self.assertEqual(mock_runcmd.call_count, 0)
self.assertEqual(mock_mkdtemp.call_count, 0)
self.assertEqual(mock_chrootruncmd.call_count, 1)
self.assertTrue(self.callGetNewKeyvals)
self.assertEqual(self.kv_dict, {'': 'PASS', 'telemetry_Crosperf': 'PASS'})
self.assertEqual(res, {'Total': [10, 'score'], 'first_time': [680, 'ms']})
# Test 3. suite != telemetry_Crosperf. Normally this would be for
# running non-Telemetry autotests, such as BootPerfServer. In this test
# case, the keyvals we have set up were returned from a Telemetry test run;
# so this pass is basically testing that we don't append the units to the
# test results (which we do for Telemetry autotest runs).
reset()
self.result.suite = ''
res = self.result.GetKeyvals()
self.assertEqual(res, {'Total': 10, 'first_time': 680})
@mock.patch.object(misc, 'GetInsideChrootPath')
@mock.patch.object(command_executer.CommandExecuter,
'ChrootRunCommandWOutput')
def test_get_samples(self, mock_chrootruncmd, mock_getpath):
fake_file = '/usr/chromeos/chroot/tmp/results/fake_file'
self.result.perf_data_files = ['/tmp/results/perf.data']
self.result.board = 'samus'
mock_getpath.return_value = fake_file
self.result.ce.ChrootRunCommandWOutput = mock_chrootruncmd
mock_chrootruncmd.return_value = ['', '45.42% 237210 chrome ', '']
samples = self.result.GetSamples()
self.assertEqual(samples, [237210, u'samples'])
def test_get_results_dir(self):
self.result.out = ''
self.assertRaises(Exception, self.result.GetResultsDir)
self.result.out = OUTPUT
resdir = self.result.GetResultsDir()
self.assertEqual(resdir, '/tmp/test_that.PO1234567/platform_LibCBench')
@mock.patch.object(command_executer.CommandExecuter, 'RunCommandGeneric')
def test_find_files_in_results_dir(self, mock_runcmd):
self.result.results_dir = None
res = self.result.FindFilesInResultsDir('-name perf.data')
self.assertEqual(res, '')
self.result.ce.RunCommand = mock_runcmd
self.result.results_dir = '/tmp/test_results'
mock_runcmd.return_value = [0, '/tmp/test_results/perf.data', '']
res = self.result.FindFilesInResultsDir('-name perf.data')
self.assertEqual(mock_runcmd.call_count, 1)
self.assertEqual(mock_runcmd.call_args_list[0][0],
('find /tmp/test_results -name perf.data',))
self.assertEqual(res, '/tmp/test_results/perf.data')
mock_runcmd.reset_mock()
mock_runcmd.return_value = [1, '', '']
self.assertRaises(Exception, self.result.FindFilesInResultsDir,
'-name perf.data')
@mock.patch.object(Result, 'FindFilesInResultsDir')
def test_get_perf_data_files(self, mock_findfiles):
self.args = None
mock_findfiles.return_value = 'line1\nline1\n'
self.result.FindFilesInResultsDir = mock_findfiles
res = self.result.GetPerfDataFiles()
self.assertEqual(res, ['line1', 'line1'])
self.assertEqual(mock_findfiles.call_args_list[0][0], ('-name perf.data',))
def test_get_perf_report_files(self):
self.args = None
def FakeFindFiles(find_args):
self.args = find_args
return 'line1\nline1\n'
self.result.FindFilesInResultsDir = FakeFindFiles
res = self.result.GetPerfReportFiles()
self.assertEqual(res, ['line1', 'line1'])
self.assertEqual(self.args, '-name perf.data.report')
def test_get_data_measurement_files(self):
self.args = None
def FakeFindFiles(find_args):
self.args = find_args
return 'line1\nline1\n'
self.result.FindFilesInResultsDir = FakeFindFiles
res = self.result.GetDataMeasurementsFiles()
self.assertEqual(res, ['line1', 'line1'])
self.assertEqual(self.args, '-name perf_measurements')
@mock.patch.object(command_executer.CommandExecuter, 'RunCommandWOutput')
def test_get_turbostat_file_finds_single_log(self, mock_runcmd):
"""Expected behavior when a single log file found."""
self.result.results_dir = '/tmp/test_results'
self.result.ce.RunCommandWOutput = mock_runcmd
mock_runcmd.return_value = (0, 'some/long/path/turbostat.log', '')
found_single_log = self.result.GetTurbostatFile()
self.assertEqual(found_single_log, 'some/long/path/turbostat.log')
@mock.patch.object(command_executer.CommandExecuter, 'RunCommandWOutput')
def test_get_turbostat_file_finds_multiple_logs(self, mock_runcmd):
"""Error case when multiple files found."""
self.result.results_dir = '/tmp/test_results'
self.result.ce.RunCommandWOutput = mock_runcmd
mock_runcmd.return_value = (0,
'some/long/path/turbostat.log\nturbostat.log',
'')
found_first_logs = self.result.GetTurbostatFile()
self.assertEqual(found_first_logs, 'some/long/path/turbostat.log')
@mock.patch.object(command_executer.CommandExecuter, 'RunCommandWOutput')
def test_get_turbostat_file_finds_no_logs(self, mock_runcmd):
"""Error case when no log file found."""
self.result.results_dir = '/tmp/test_results'
self.result.ce.RunCommandWOutput = mock_runcmd
mock_runcmd.return_value = (0, '', '')
found_no_logs = self.result.GetTurbostatFile()
self.assertEqual(found_no_logs, '')
@mock.patch.object(command_executer.CommandExecuter, 'RunCommandWOutput')
def test_get_turbostat_file_with_failing_find(self, mock_runcmd):
"""Error case when file search returns an error."""
self.result.results_dir = '/tmp/test_results'
mock_runcmd.return_value = (-1, '', 'error')
with self.assertRaises(RuntimeError):
self.result.GetTurbostatFile()
@mock.patch.object(command_executer.CommandExecuter, 'RunCommandWOutput')
def test_get_top_file_finds_single_log(self, mock_runcmd):
"""Expected behavior when a single top log file found."""
self.result.results_dir = '/tmp/test_results'
self.result.ce.RunCommandWOutput = mock_runcmd
mock_runcmd.return_value = (0, 'some/long/path/top.log', '')
found_single_log = self.result.GetTopFile()
self.assertEqual(found_single_log, 'some/long/path/top.log')
@mock.patch.object(command_executer.CommandExecuter, 'RunCommandWOutput')
def test_get_top_file_finds_multiple_logs(self, mock_runcmd):
"""The case when multiple top files found."""
self.result.results_dir = '/tmp/test_results'
self.result.ce.RunCommandWOutput = mock_runcmd
mock_runcmd.return_value = (0, 'some/long/path/top.log\ntop.log', '')
found_first_logs = self.result.GetTopFile()
self.assertEqual(found_first_logs, 'some/long/path/top.log')
@mock.patch.object(command_executer.CommandExecuter, 'RunCommandWOutput')
def test_get_top_file_finds_no_logs(self, mock_runcmd):
"""Error case when no log file found."""
self.result.results_dir = '/tmp/test_results'
self.result.ce.RunCommandWOutput = mock_runcmd
mock_runcmd.return_value = (0, '', '')
found_no_logs = self.result.GetTopFile()
self.assertEqual(found_no_logs, '')
@mock.patch.object(command_executer.CommandExecuter, 'RunCommandWOutput')
def test_get_cpustats_file_finds_single_log(self, mock_runcmd):
"""Expected behavior when a single log file found."""
self.result.results_dir = '/tmp/test_results'
self.result.ce.RunCommandWOutput = mock_runcmd
mock_runcmd.return_value = (0, 'some/long/path/cpustats.log', '')
found_single_log = self.result.GetCpustatsFile()
self.assertEqual(found_single_log, 'some/long/path/cpustats.log')
@mock.patch.object(command_executer.CommandExecuter, 'RunCommandWOutput')
def test_get_cpustats_file_finds_multiple_logs(self, mock_runcmd):
"""The case when multiple files found."""
self.result.results_dir = '/tmp/test_results'
self.result.ce.RunCommandWOutput = mock_runcmd
mock_runcmd.return_value = (0, 'some/long/path/cpustats.log\ncpustats.log',
'')
found_first_logs = self.result.GetCpustatsFile()
self.assertEqual(found_first_logs, 'some/long/path/cpustats.log')
@mock.patch.object(command_executer.CommandExecuter, 'RunCommandWOutput')
def test_get_cpustats_file_finds_no_logs(self, mock_runcmd):
"""Error case when no log file found."""
self.result.results_dir = '/tmp/test_results'
self.result.ce.RunCommandWOutput = mock_runcmd
mock_runcmd.return_value = (0, '', '')
found_no_logs = self.result.GetCpustatsFile()
self.assertEqual(found_no_logs, '')
def test_process_turbostat_results_with_valid_data(self):
"""Normal case when log exists and contains valid data."""
self.result.turbostat_log_file = '/tmp/somelogfile.log'
with mock.patch('builtins.open',
mock.mock_open(read_data=TURBOSTAT_LOG_OUTPUT)) as mo:
cpustats = self.result.ProcessTurbostatResults()
# Check that the log got opened and data were read/parsed.
calls = [mock.call('/tmp/somelogfile.log')]
mo.assert_has_calls(calls)
self.assertEqual(cpustats, TURBOSTAT_DATA)
def test_process_turbostat_results_from_empty_file(self):
"""Error case when log exists but file is empty."""
self.result.turbostat_log_file = '/tmp/emptylogfile.log'
with mock.patch('builtins.open', mock.mock_open(read_data='')) as mo:
| |
import collections
import logging
import uuid
from contextlib import contextmanager
import requests
from kinto_http import utils
from kinto_http.session import create_session, Session
from kinto_http.batch import BatchSession
from kinto_http.exceptions import (
BucketNotFound,
CollectionNotFound,
KintoException,
KintoBatchException,
)
from kinto_http.patch_type import PatchType, BasicPatch
logger = logging.getLogger("kinto_http")
__all__ = (
"BearerTokenAuth",
"Endpoints",
"Session",
"Client",
"create_session",
"BucketNotFound",
"CollectionNotFound",
"KintoException",
"KintoBatchException",
)
OBJECTS_PERMISSIONS = {
"bucket": ["group:create", "collection:create", "write", "read"],
"group": ["write", "read"],
"collection": ["write", "read", "record:create"],
"record": ["read", "write"],
}
ID_FIELD = "id"
DO_NOT_OVERWRITE = {"If-None-Match": "*"}
class Endpoints(object):
endpoints = {
"root": "{root}/",
"batch": "{root}/batch",
"buckets": "{root}/buckets",
"bucket": "{root}/buckets/{bucket}",
"history": "{root}/buckets/{bucket}/history",
"groups": "{root}/buckets/{bucket}/groups",
"group": "{root}/buckets/{bucket}/groups/{group}",
"collections": "{root}/buckets/{bucket}/collections",
"collection": "{root}/buckets/{bucket}/collections/{collection}",
"records": "{root}/buckets/{bucket}/collections/{collection}/records", # NOQA
"record": "{root}/buckets/{bucket}/collections/{collection}/records/{id}", # NOQA
}
def __init__(self, root=""):
self._root = root
def get(self, endpoint, **kwargs):
# Remove nullable values from the kwargs, and slugify the values.
kwargs = dict((k, utils.slugify(v)) for k, v in kwargs.items() if v)
try:
pattern = self.endpoints[endpoint]
return pattern.format(root=self._root, **kwargs)
except KeyError as e:
msg = "Cannot get {endpoint} endpoint, {field} is missing"
raise KintoException(msg.format(endpoint=endpoint, field=",".join(e.args)))
class BearerTokenAuth(requests.auth.AuthBase):
def __init__(self, token, type=None):
self.token = token
self.type = type or "Bearer"
def __call__(self, r):
r.headers["Authorization"] = "{} {}".format(self.type, self.token)
return r
class Client(object):
def __init__(
self,
*,
server_url=None,
session=None,
auth=None,
bucket="default",
collection=None,
retry=0,
retry_after=None,
timeout=None,
ignore_batch_4xx=False,
headers=None,
):
self.endpoints = Endpoints()
session_kwargs = dict(
server_url=server_url,
auth=auth,
session=session,
retry=retry,
retry_after=retry_after,
timeout=timeout,
headers=headers,
)
self.session = create_session(**session_kwargs)
self._bucket_name = bucket
self._collection_name = collection
self._server_settings = None
self._records_timestamp = {}
self._ignore_batch_4xx = ignore_batch_4xx
def clone(self, **kwargs):
if "server_url" in kwargs or "auth" in kwargs:
kwargs.setdefault("server_url", self.session.server_url)
kwargs.setdefault("auth", self.session.auth)
else:
kwargs.setdefault("session", self.session)
kwargs.setdefault("bucket", self._bucket_name)
kwargs.setdefault("collection", self._collection_name)
kwargs.setdefault("retry", self.session.nb_retry)
kwargs.setdefault("retry_after", self.session.retry_after)
return Client(**kwargs)
@contextmanager
def batch(self, **kwargs):
if self._server_settings is None:
resp, _ = self.session.request("GET", self.get_endpoint("root"))
self._server_settings = resp["settings"]
batch_max_requests = self._server_settings["batch_max_requests"]
batch_session = BatchSession(
self, batch_max_requests=batch_max_requests, ignore_4xx_errors=self._ignore_batch_4xx
)
batch_client = self.clone(session=batch_session, **kwargs)
# Set a reference for reading results from the context.
batch_client.results = batch_session.results
yield batch_client
batch_session.send()
batch_session.reset()
def get_endpoint(self, name, *, bucket=None, group=None, collection=None, id=None):
"""Return the endpoint with named parameters."""
kwargs = {
"bucket": bucket or self._bucket_name,
"collection": collection or self._collection_name,
"group": group,
"id": id,
}
return self.endpoints.get(name, **kwargs)
def _paginated(self, endpoint, records=None, *, if_none_match=None, pages=None, **kwargs):
if records is None:
records = collections.OrderedDict()
headers = {}
if if_none_match is not None:
headers["If-None-Match"] = utils.quote(if_none_match)
if pages is None:
pages = 1 if "_limit" in kwargs else float("inf")
record_resp, headers = self.session.request(
"get", endpoint, headers=headers, params=kwargs
)
# Save the current records collection timestamp
etag = headers.get("ETag", "").strip('"')
self._records_timestamp[endpoint] = etag
if record_resp:
records_tuples = [(r["id"], r) for r in record_resp["data"]]
records.update(collections.OrderedDict(records_tuples))
if pages > 1 and "next-page" in map(str.lower, headers.keys()):
# Paginated wants a relative URL, but the returned one is
# absolute.
next_page = headers["Next-Page"]
return self._paginated(
next_page, records, if_none_match=if_none_match, pages=pages - 1
)
return list(records.values())
def _get_cache_headers(self, safe, data=None, if_match=None):
has_data = data is not None and data.get("last_modified")
if if_match is None and has_data:
if_match = data["last_modified"]
if safe and if_match is not None:
return {"If-Match": utils.quote(if_match)}
# else return None
def _extract_original_info(self, original, id, if_match):
"""Utility method to extract ID and last_modified.
Many update methods require the ID of a resource (to generate
a URL) and the last_modified to generate safe cache headers
(If-Match). As a convenience, we allow users to pass the
original record retrieved from a get_* method, which also
contains those values. This utility method lets methods
support both explicit arguments for ``id`` and ``if_match`` as
well as implicitly reading them from an original resource.
"""
if original:
id = id or original.get("id")
if_match = if_match or original.get("last_modified")
return (id, if_match)
def _patch_method(
self, endpoint, patch, safe=True, if_match=None, data=None, permissions=None
):
"""Utility method for implementing PATCH methods."""
if not patch:
# Backwards compatibility: the changes argument was
# introduced in 9.1.0, and covers both ``data`` and
# ``permissions`` arguments. Support the old style of
# passing dicts by casting them into a BasicPatch.
patch = BasicPatch(data=data, permissions=permissions)
if not isinstance(patch, PatchType):
raise TypeError("couldn't understand patch body {}".format(patch))
body = patch.body
content_type = patch.content_type
headers = self._get_cache_headers(safe, if_match=if_match) or {}
headers["Content-Type"] = content_type
resp, _ = self.session.request("patch", endpoint, payload=body, headers=headers)
return resp
def _create_if_not_exists(self, resource, **kwargs):
try:
create_method = getattr(self, "create_%s" % resource)
return create_method(**kwargs)
except KintoException as e:
if not hasattr(e, "response") or e.response.status_code != 412:
raise e
# The exception contains the existing record in details.existing
# but it's not enough as we also need to return the permissions.
get_kwargs = {"id": kwargs["id"]}
if resource in ("group", "collection", "record"):
get_kwargs["bucket"] = kwargs["bucket"]
if resource == "record":
get_kwargs["collection"] = kwargs["collection"]
_id = kwargs.get("id") or kwargs["data"]["id"]
get_kwargs["id"] = _id
get_method = getattr(self, "get_%s" % resource)
return get_method(**get_kwargs)
def _delete_if_exists(self, resource, **kwargs):
try:
delete_method = getattr(self, "delete_%s" % resource)
return delete_method(**kwargs)
except KintoException as e:
# Should not raise in case of a 404.
should_raise = not (
hasattr(e, "response") and e.response is not None and e.response.status_code == 404
)
# Should not raise in case of a 403 on a bucket.
if should_raise and resource.startswith("bucket"):
should_raise = not (
hasattr(e, "response")
and e.response is not None
and e.response.status_code == 403
)
if should_raise:
raise e
# Server Info
def server_info(self):
endpoint = self.get_endpoint("root")
resp, _ = self.session.request("get", endpoint)
return resp
# Buckets
def create_bucket(
self, *, id=None, data=None, permissions=None, safe=True, if_not_exists=False
):
if id is None and data:
id = data.get("id", None)
if if_not_exists:
return self._create_if_not_exists(
"bucket", id=id, data=data, permissions=permissions, safe=safe
)
headers = DO_NOT_OVERWRITE if safe else None
endpoint = self.get_endpoint("bucket", bucket=id)
logger.info("Create bucket %r" % id or self._bucket_name)
resp, _ = self.session.request(
"put", endpoint, data=data, permissions=permissions, headers=headers
)
return resp
def update_bucket(self, *, id=None, data=None, permissions=None, safe=True, if_match=None):
if id is None and data:
id = data.get("id", None)
endpoint = self.get_endpoint("bucket", bucket=id)
headers = self._get_cache_headers(safe, data, if_match)
logger.info("Update bucket %r" % id or self._bucket_name)
resp, _ = self.session.request(
"put", endpoint, data=data, permissions=permissions, headers=headers
)
return resp
def patch_bucket(
self,
*,
id=None,
changes=None,
data=None,
original=None,
permissions=None,
safe=True,
if_match=None,
):
"""Issue a PATCH request on a bucket.
:param changes: the patch to apply
:type changes: PatchType
:param original: the original bucket, from which the ID and
last_modified can be taken
:type original: dict
"""
# Backwards compatibility: a dict is both a BasicPatch and a
# possible bucket (this was the behavior in 9.0.1 and
# earlier). In other words, we consider the data as a
# possible bucket, even though PATCH data probably shouldn't
# also contain an ID or a last_modified, as these shouldn't be
# modified by a user.
original = original or data
(id, if_match) = self._extract_original_info(original, id, if_match)
endpoint = self.get_endpoint("bucket", bucket=id)
logger.info("Patch bucket %r" % (id or self._bucket_name,))
return self._patch_method(
endpoint, changes, data=data, permissions=permissions, safe=safe, if_match=if_match
)
def get_buckets(self, **kwargs):
endpoint = self.get_endpoint("buckets")
return self._paginated(endpoint, **kwargs)
def get_bucket(self, *, id=None, **kwargs):
endpoint = self.get_endpoint("bucket", bucket=id)
logger.info("Get bucket %r" % id or self._bucket_name)
try:
resp, _ = self.session.request("get", endpoint, params=kwargs)
except KintoException as e:
error_resp_code = e.response.status_code
if error_resp_code == 401:
msg = (
"Unauthorized. Please authenticate or make sure the bucket "
"can be read anonymously."
)
e = KintoException(msg, e)
raise e
raise BucketNotFound(id or self._bucket_name, e)
return resp
def delete_bucket(self, *, id=None, safe=True, if_match=None, if_exists=False):
if if_exists:
return self._delete_if_exists("bucket", id=id, safe=safe, if_match=if_match)
endpoint = self.get_endpoint("bucket", bucket=id)
headers = self._get_cache_headers(safe, if_match=if_match)
logger.info("Delete bucket %r" % id or self._bucket_name)
resp, _ = self.session.request("delete", endpoint, headers=headers)
return resp["data"]
def delete_buckets(self, *, safe=True, if_match=None):
endpoint = self.get_endpoint("buckets")
headers = self._get_cache_headers(safe, if_match=if_match)
logger.info("Delete buckets")
resp, _ = self.session.request("delete", endpoint, headers=headers)
return resp["data"]
# Groups
def get_groups(self, *, bucket=None, **kwargs):
endpoint = self.get_endpoint("groups", bucket=bucket)
return self._paginated(endpoint, **kwargs)
def create_group(
self, *, id=None, bucket=None, data=None, permissions=None, safe=True, if_not_exists=False
):
if id is None and data:
id = data.get("id", None)
if id is None:
raise KeyError("Please provide a group id")
if if_not_exists:
return self._create_if_not_exists(
"group", id=id, bucket=bucket, data=data, permissions=permissions, safe=safe
)
headers = DO_NOT_OVERWRITE if safe else None
endpoint = self.get_endpoint("group", bucket=bucket, group=id)
logger.info("Create | |
0", so the sign flag would be set to 1 if the value isn't
# accessible.
# We inline the Shadow::IsAccessible function for performance reasons.
# This function does the following:
# - Checks if this byte is accessible and jumps to the error path if it's
# not.
# - Removes the memory location from the top of the stack.
_SLOW_PATH = """\
js report_failure_{probe_index}
mov dh, BYTE PTR[esp]
and dh, 7
cmp dh, dl
jae report_failure_{probe_index}
add esp, 4"""
# The error path.
#
# It expects to have the previous value of EDX at [ESP + 4] and the address
# of the faulty instruction at [ESP].
# This macro takes care of saving and restoring the flags.
_ERROR_PATH ="""\
; Restore original value of EDX, and put memory location on stack.
xchg edx, DWORD PTR[esp + 4]
; Create an Asan registers context on the stack.
pushfd
pushad
; Fix the original value of ESP in the Asan registers context.
; Removing 12 bytes (e.g. EFLAGS / EIP / Original EDX).
add DWORD PTR[esp + 12], 12
; Push ARG4: the address of Asan context on stack.
push esp
; Push ARG3: the access size.
push {access_size}
; Push ARG2: the access type.
push {access_mode_value}
; Push ARG1: the memory location.
push DWORD PTR[esp + 52]
call asan_report_bad_memory_access
; Remove 4 x ARG on stack.
add esp, 16
; Restore original registers.
popad
popfd
; Return and remove memory location on stack.
ret 4"""
# Collects the above macros and bundles them up in a dictionary so they can be
# easily expanded by the string format functions.
_MACROS = {
"AsanSaveEflags": _SAVE_EFLAGS,
"AsanRestoreEflags": _RESTORE_EFLAGS,
"AsanFastPath": _FAST_PATH,
"AsanSlowPath": _SLOW_PATH,
"AsanErrorPath": _ERROR_PATH,
}
# Generates the Asan check access functions.
#
# The name of the generated method will be
# asan_check_(@p access_size)_byte_(@p access_mode_str)().
#
# Args:
# access_size: The size of the access (in byte).
# access_mode_str: The string representing the access mode (read_access
# or write_access).
# access_mode_value: The internal value representing this kind of
# access.
# probe_index: The index of the probe function. Used to mangle internal labels
# so that they are unique to this probes implementation.
_CHECK_FUNCTION = """\
; On entry, the address to check is in EDX and the previous contents of
; EDX are on stack. On exit the previous contents of EDX have been restored
; and popped off the stack. This function modifies no other registers,
; in particular it saves and restores EFLAGS.
ALIGN 16
asan_check_{access_size}_byte_{access_mode_str}_{mem_model} PROC \
; Probe #{probe_index}.
{AsanSaveEflags}
{AsanFastPath}
; Restore original EDX.
mov edx, DWORD PTR[esp + 8]
{AsanRestoreEflags}
ret 4
check_access_slow_{probe_index} LABEL NEAR
{AsanSlowPath}
; Restore original EDX.
mov edx, DWORD PTR[esp + 8]
{AsanRestoreEflags}
ret 4
report_failure_{probe_index} LABEL NEAR
; Restore memory location in EDX.
pop edx
{AsanRestoreEflags}
{AsanErrorPath}
asan_check_{access_size}_byte_{access_mode_str}_{mem_model} ENDP
"""
# Declare the check access function public label.
_CHECK_FUNCTION_DECL = """\
PUBLIC asan_check_{access_size}_byte_{access_mode_str}_{mem_model} ; Probe \
#{probe_index}."""
# Generates a variant of the Asan check access functions that don't save
# the flags.
#
# The name of the generated method will be
# asan_check_(@p access_size)_byte_(@p access_mode_str)_no_flags().
#
# Args:
# access_size: The size of the access (in byte).
# access_mode_str: The string representing the access mode (read_access
# or write_access).
# access_mode_value: The internal value representing this kind of access.
# probe_index: The index of the probe function. Used to mangle internal labels
# so that they are unique to this probes implementation.
# Note: Calling this function may alter the EFLAGS register only.
_CHECK_FUNCTION_NO_FLAGS = """\
; On entry, the address to check is in EDX and the previous contents of
; EDX are on stack. On exit the previous contents of EDX have been restored
; and popped off the stack. This function may modify EFLAGS, but preserves
; all other registers.
ALIGN 16
asan_check_{access_size}_byte_{access_mode_str}_no_flags_{mem_model} PROC \
; Probe #{probe_index}.
{AsanFastPath}
; Restore original EDX.
mov edx, DWORD PTR[esp + 4]
ret 4
check_access_slow_{probe_index} LABEL NEAR
{AsanSlowPath}
; Restore original EDX.
mov edx, DWORD PTR[esp + 4]
ret 4
report_failure_{probe_index} LABEL NEAR
; Restore memory location in EDX.
pop edx
{AsanErrorPath}
asan_check_{access_size}_byte_{access_mode_str}_no_flags_{mem_model} ENDP
"""
# Declare the check access function public label.
_CHECK_FUNCTION_NO_FLAGS_DECL = """\
PUBLIC asan_check_{access_size}_byte_{access_mode_str}_no_flags_{mem_model} \
; Probe #{probe_index}."""
# Generates the Asan memory accessor redirector stubs.
#
# The name of the generated method will be
# asan_redirect_(@p access_size)_byte_(@p access_mode_str)(@p suffix)().
#
# Args:
# access_size: The size of the access (in byte).
# access_mode_str: The string representing the access mode (read_access
# or write_access).
# suffix: The suffix - if any - for this function name
_REDIRECT_FUNCTION = """\
asan_redirect_{access_size}_byte_{access_mode_str}{suffix} LABEL PROC
call asan_redirect_tail"""
# Declare the public label.
_REDIRECT_FUNCTION_DECL = """\
PUBLIC asan_redirect_{access_size}_byte_{access_mode_str}{suffix}"""
# Generates the Clang-Asan memory accessor redirector stubs.
#
# The name of the generated method will be
# asan_redirect_(@p access_mode_str)(@p access_size)().
#
# Args:
# access_size: The size of the access (in byte).
# access_mode_str: The string representing the access mode (load or store).
_CLANG_REDIRECT_FUNCTION = """\
asan_redirect_{access_mode_str}{access_size} LABEL PROC
call asan_redirect_tail_clang"""
# Declare the public label.
_CLANG_REDIRECT_FUNCTION_DECL = """\
PUBLIC asan_redirect_{access_mode_str}{access_size}"""
# Generates the Asan check access functions for a string instruction.
#
# The name of the generated method will be
# asan_check_(@p prefix)(@p access_size)_byte_(@p inst)_access().
#
# Args:
# inst: The instruction mnemonic.
# prefix: The prefix of the instruction (repz or nothing).
# counter: The number of times the instruction must be executed (ECX).
# It may be a register or a constant.
# dst:_mode The memory access mode for destination (EDI).
# src:_mode The memory access mode for destination (ESI).
# access:_size The size of the access (in byte).
# compare: A flag to enable shortcut execution by comparing memory
# contents.
_CHECK_STRINGS = """\
ALIGN 16
asan_check{prefix}{access_size}_byte_{func}_access PROC ; Probe #{probe_index}.
; Prologue, save context.
pushfd
pushad
; Fix the original value of ESP in the Asan registers context.
; Removing 8 bytes (e.g.EFLAGS / EIP was on stack).
add DWORD PTR[esp + 12], 8
; Setup increment in EBX (depends on direction flag in EFLAGS).
mov ebx, {access_size}
pushfd
pop eax
test eax, 400h
jz skip_neg_direction_{probe_index}
neg ebx
skip_neg_direction_{probe_index} LABEL NEAR
; By standard calling convention, direction flag must be forward.
cld
; Push ARG(context), the Asan registers context.
push esp
; Push ARG(compare), shortcut when memory contents differ.
push {compare}
; Push ARG(increment), increment for EDI/EDI.
push ebx
; Push ARG(access_size), the access size.
push {access_size}
; Push ARG(length), the number of memory accesses.
push {counter}
; Push ARG(src_access_mode), source access type.
push {src_mode}
; Push ARG(src), the source pointer.
push esi
; Push ARG(dst_access_mode), destination access type.
push {dst_mode}
; Push ARG(dst), the destination pointer.
push edi
; Call the generic check strings function.
call asan_check_strings_memory_accesses
add esp, 36
; Epilogue, restore context.
popad
popfd
ret
asan_check{prefix}{access_size}_byte_{func}_access ENDP
"""
# Declare the string checking probe public label.
_CHECK_STRINGS_DECL = """\
PUBLIC asan_check{prefix}{access_size}_byte_{func}_access ; Probe \
#{probe_index}."""
# Generates the Asan string memory accessor redirector stubs.
#
# The name of the generated method will be
# asan_redirect_(@p prefix)(@p access_size)_byte_(@p inst)_access().
#
# Args:
# inst: The instruction mnemonic.
# prefix: The prefix of the instruction (repz or nothing).
# counter: The number of times the instruction must be executed (ECX).
# It may be a register or a constant.
# dst:_mode The memory access mode for destination (EDI).
# src:_mode The memory access mode for destination (ESI).
# access:_size The size of the access (in byte).
# compare: A flag to enable shortcut execution by comparing memory
# contents.
_STRING_REDIRECT_FUNCTION = """\
asan_redirect{prefix}{access_size}_byte_{func}_access LABEL PROC
call asan_redirect_tail"""
# Declare the public label.
_STRING_REDIRECT_FUNCTION_DECL = """\
PUBLIC asan_redirect{prefix}{access_size}_byte_{func}_access"""
class MacroAssembler(string.Formatter):
"""A formatter specialization to inject the AsanXXX macros and make
them easier to use."""
def parse(self, str):
"""Override to trim whitespace on empty trailing line."""
for (lit, fld, fmt, conv) in super(MacroAssembler, self).parse(str):
# Strip trailing whitespace from the previous literal to allow natural
# use of AsanXXX macros.
m = re.match('^(.*\n)( +)$', lit)
if m:
lit = m.group(0)
yield((lit, fld, fmt, conv))
def get_value(self, key, args, kwargs):
"""Override to inject macro definitions."""
if key in _MACROS:
macro = _MACROS[key].format(*args, **kwargs)
# Trim leading whitespace to allow natural use of AsanXXX macros.
macro = macro.lstrip()
return macro
return super(MacroAssembler, self).get_value(key, args, kwargs)
# Access sizes for the memory accessors generated.
_ACCESS_SIZES = (1, 2, 4, 8, 10, 16, 32)
# These values must correspond to those defined in the agent::asan::AccessMode
# enum. See syzygy/agent/asan/error_info.h.
_ASAN_READ_ACCESS = 0
_ASAN_WRITE_ACCESS = 1
_ASAN_UNKNOWN_ACCESS = 2
# Access modes for the memory accessors generated.
_ACCESS_MODES = [
('read_access', _ASAN_READ_ACCESS),
('write_access', _ASAN_WRITE_ACCESS),
]
_CLANG_ACCESS_MODES = [
('load', _ASAN_READ_ACCESS),
('store', _ASAN_WRITE_ACCESS),
]
# Memory | |
not None:
self._logger.info(' '.join(['SPECTRAL INDICES COMMAND:', cmd]))
output = ''
try:
output = utilities.execute_cmd(cmd)
finally:
if len(output) > 0:
self._logger.info(output)
def generate_surface_water_extent(self):
"""Generates the Dynamic Surface Water Extent product
"""
options = self._parms['options']
if not options['include_dswe']:
return
cmd = ['surface_water_extent.py',
'--xml', self._xml_filename,
'--verbose']
cmd = ' '.join(cmd)
self._logger.info(' '.join(['SURFACE WATER EXTENT COMMAND:', cmd]))
output = ''
try:
output = utilities.execute_cmd(cmd)
finally:
if len(output) > 0:
self._logger.info(output)
def generate_surface_temperature(self):
"""Generates the Surface Temperature product
"""
options = self._parms['options']
cmd = None
if options['include_st']:
if options['st_algorithm'] == "single_channel":
cmd = ['surface_temperature.py',
'--xml', self._xml_filename,
'--keep-intermediate-data',
'--st_algorithm', options['st_algorithm'],
'--reanalysis', options['reanalysis_source']]
else: # Split Window
cmd = ['surface_temperature.py',
'--xml', self._xml_filename,
'--keep-intermediate-data',
'--st_algorithm', options['st_algorithm']]
cmd = ' '.join(cmd)
# Only if required
if cmd is not None:
self._logger.info(' '.join(['ST COMMAND:', cmd]))
output = ''
try:
output = utilities.execute_cmd(cmd)
finally:
if len(output) > 0:
self._logger.info(output)
def build_science_products(self):
"""Build the science products requested by the user
"""
# Nothing to do if the user did not specify anything to build
if not self._build_products:
return
self._logger.info('[LandsatProcessor] Building Science Products')
# Change to the working directory
current_directory = os.getcwd()
os.chdir(self._work_dir)
try:
self.convert_to_raw_binary()
self.clip_band_misalignment()
self.generate_elevation_product()
self.generate_pixel_qa()
self.generate_sr_products()
self.generate_dilated_cloud()
self.generate_cfmask_water_detection()
self.generate_spectral_indices()
self.generate_surface_water_extent()
self.generate_surface_temperature()
self.generate_aquatic_reflectance()
finally:
# Change back to the previous directory
os.chdir(current_directory)
def remove_products_from_xml(self):
"""Remove the specified products from the XML file
The file is read into memory, processed, and written back out without
the specified products.
Specific for Landsat products
"""
# Nothing to do if the user did not specify anything to build
if not self._build_products:
return
options = self._parms['options']
# Map order options to the products in the XML files
order2product = {
'source_data': ['L1T', 'L1G', 'L1TP', 'L1GT', 'L1GS'],
'include_sr': 'sr_refl',
'include_sr_toa': 'toa_refl',
'include_sr_thermal': 'toa_bt',
'angle_bands': 'angle_bands',
'keep_intermediate_data': 'intermediate_data'
}
# If nothing to do just return
if self._xml_filename is None:
return
# Remove generated products that were not requested
products_to_remove = []
if not options['include_customized_source_data']:
products_to_remove.extend(
order2product['source_data'])
if not options['include_sr']:
products_to_remove.append(
order2product['include_sr'])
if not options['include_sr_toa']:
products_to_remove.append(
order2product['include_sr_toa'])
products_to_remove.append(
order2product['angle_bands'])
if not options['include_sr_thermal']:
products_to_remove.append(
order2product['include_sr_thermal'])
if not options['keep_intermediate_data']:
products_to_remove.append(
order2product['keep_intermediate_data'])
# Always remove the elevation data
products_to_remove.append('elevation')
if products_to_remove is not None:
# Create and load the metadata object
espa_metadata = Metadata(xml_filename=self._xml_filename)
# Search for and remove the items
for band in espa_metadata.xml_object.bands.band:
if band.attrib['product'] in products_to_remove:
# Business logic to always keep the radsat_qa band if bt,
# or toa, or sr output was chosen
if (band.attrib['name'] == 'radsat_qa' and
(options['include_sr'] or options['include_sr_toa'] or
options['include_sr_thermal'])):
continue
else:
self.remove_band_from_xml(band)
# Validate the XML
espa_metadata.validate()
# Write it to the XML file
espa_metadata.write(xml_filename=self._xml_filename)
del espa_metadata
def cleanup_work_dir(self):
"""Cleanup all the intermediate non-products and the science
products not requested
"""
product_id = self._parms['product_id']
options = self._parms['options']
# Define intermediate files that need to be removed before product
# tarball generation
intermediate_files = [
'lndsr.*.txt',
'lndcal.*.txt',
'LogReport*',
'*_elevation.*'
]
# Define L1 source files that may need to be removed before product
# tarball generation
l1_source_files = [
'L*.TIF',
'README.GTF',
'*gap_mask*',
'L*_GCP.txt',
'L*_VER.jpg',
'L*_VER.txt',
]
# Define source metadata files that can be removed if not requested
# before product tarball generation
metadata_files = [
'*_ANG.txt',
'*_MTL.txt'
]
# Change to the working directory
current_directory = os.getcwd()
os.chdir(self._work_dir)
try:
non_products = []
# Remove the intermediate non-product files
if not options['keep_intermediate_data']:
for item in intermediate_files:
non_products.extend(glob.glob(item))
# Add level 1 source files if not requested
if not options['include_source_data']:
for item in l1_source_files:
non_products.extend(glob.glob(item))
# Add source metadata files if not requested and no source data included in the order
if not options['include_source_metadata']:
if not options['include_customized_source_data']:
for item in metadata_files:
non_products.extend(glob.glob(item))
if len(non_products) > 0:
cmd = ' '.join(['rm', '-rf'] + non_products)
self._logger.info(' '.join(['REMOVING INTERMEDIATE DATA'
' COMMAND:', cmd]))
output = ''
try:
output = utilities.execute_cmd(cmd)
finally:
if len(output) > 0:
self._logger.info(output)
self.remove_products_from_xml()
finally:
# Change back to the previous directory
os.chdir(current_directory)
def generate_statistics(self):
"""Generates statistics if required for the processor
"""
options = self._parms['options']
# Nothing to do if the user did not specify anything to build
if not self._build_products or not options['include_statistics']:
return
# Generate the stats for each stat'able' science product
# Hold the wild card strings in a type based dictionary
files_to_search_for = dict()
# Landsat files (Includes L4-L8)
# The types must match the types in settings.py
files_to_search_for['SR'] = ['*_sr_band[0-9].img']
files_to_search_for['TOA'] = ['*_toa_band[0-9].img']
files_to_search_for['BT'] = ['*_bt_band6.img',
'*_bt_band1[0-1].img']
files_to_search_for['INDEX'] = ['*_nbr.img', '*_nbr2.img',
'*_ndmi.img', '*_ndvi.img',
'*_evi.img', '*_savi.img',
'*_msavi.img']
files_to_search_for['LANDSAT_ST'] = ['*_st.img']
files_to_search_for['AR'] = ['*_ar_band[0-5].img']
# Build a command line arguments list
cmd = ['espa_statistics.py',
'--work_directory', self._work_dir,
"--files_to_search_for '{}'".format(json.dumps(files_to_search_for))]
# Turn the list into a string
cmd = ' '.join(cmd)
self._logger.info(' '.join(['SUMMARY LANDSAT STATISTICS COMMAND:', cmd]))
output = ''
try:
output = utilities.execute_cmd(cmd)
finally:
if len(output) > 0:
self._logger.info(output)
def get_product_name(self):
"""Build the product name from the product information and current
time
"""
if self._product_name is None:
product_id = self._parms['product_id']
# Get the current time information
ts = datetime.datetime.today()
# Extract stuff from the product information
product_prefix = sensor.info(product_id).product_prefix
product_name = ('{0}-SC{1}{2}{3}{4}{5}{6}'
.format(product_prefix,
str(ts.year).zfill(4),
str(ts.month).zfill(2),
str(ts.day).zfill(2),
str(ts.hour).zfill(2),
str(ts.minute).zfill(2),
str(ts.second).zfill(2)))
self._product_name = product_name
return self._product_name
class LandsatTMProcessor(LandsatProcessor):
"""Implements TM specific processing
Note:
Today all processing is inherited from the LandsatProcessors because
the TM and ETM processors are identical.
"""
def __init__(self, cfg, parms):
super(LandsatTMProcessor, self).__init__(cfg, parms)
class LandsatETMProcessor(LandsatProcessor):
"""Implements ETM specific processing
Note:
Today all processing is inherited from the LandsatProcessors because
the TM and ETM processors are identical.
"""
def __init__(self, cfg, parms):
super(LandsatETMProcessor, self).__init__(cfg, parms)
class LandsatOLITIRSProcessor(LandsatProcessor):
"""Implements OLITIRS (LC8) specific processing
"""
def __init__(self, cfg, parms):
super(LandsatOLITIRSProcessor, self).__init__(cfg, parms)
def validate_parameters(self):
"""Validates the parameters required for the processor
"""
# Call the base class parameter validation
super(LandsatOLITIRSProcessor, self).validate_parameters()
self._logger.info('Validating [LandsatOLITIRSProcessor] parameters')
options = self._parms['options']
def sr_command_line(self):
"""Returns the command line required to generate surface reflectance
Evaluates the options requested by the user to define the command
line string to use, or returns None indicating nothing todo.
"""
options = self._parms['options']
cmd = ['surface_reflectance.py', '--xml', self._xml_filename,
'--write_toa']
if not self.requires_sr_input:
cmd.extend(['--process_sr', 'False'])
return ' '.join(cmd)
class LandsatOLIProcessor(LandsatOLITIRSProcessor):
"""Implements OLI only (LO8) specific processing
"""
def __init__(self, cfg, parms):
super(LandsatOLIProcessor, self).__init__(cfg, parms)
def validate_parameters(self):
"""Validates the parameters required for the processor
"""
# Call the base class parameter validation
super(LandsatOLIProcessor, self).validate_parameters()
self._logger.info('Validating [LandsatOLIProcessor] parameters')
options = self._parms['options']
if options['include_sr'] is True:
raise ESPAException('include_sr is an unavailable product option'
' for OLI-Only data')
if options['include_sr_thermal'] is True:
raise ESPAException('include_sr_thermal is an unavailable product'
' option for OLI-Only data')
if options['include_dswe'] is True:
raise ESPAException('include_dswe is an unavailable product option'
' for OLI-Only data')
def generate_spectral_indices(self):
"""Spectral Indices processing requires surface reflectance products
as input
So since, SR products can not be produced with OLI only data, OLI only
processing can not produce spectral indices.
"""
pass
class ModisProcessor(CDRProcessor):
"""Implements the common processing between all of the MODIS
processors
"""
def __init__(self, cfg, parms):
super(ModisProcessor, self).__init__(cfg, parms)
self._hdf_filename = None
def validate_parameters(self):
"""Validates the parameters required for the processor
"""
# Call the base class parameter validation
super(ModisProcessor, self).validate_parameters()
self._logger.info('Validating [ModisProcessor] parameters')
options = self._parms['options']
# Force these parameters to false if not provided
# They are the required includes for product generation
required_includes = ['include_customized_source_data',
'include_source_data',
'include_statistics']
for parameter in required_includes:
if not parameters.test_for_parameter(options, parameter):
self._logger.warning('[{}] parameter missing defaulting to'
' False'.format(parameter))
options[parameter] = False
# Determine if we need to build products
if not options['include_customized_source_data'] and not options['include_modis_ndvi']:
self._logger.info('***NO CUSTOMIZED PRODUCTS CHOSEN***')
self._build_products = False
else:
self._build_products = True
def stage_input_data(self):
"""Stages the input data required for the processor
"""
product_id = self._parms['product_id']
download_url = self._parms['download_url']
file_name = ''.join([product_id,
settings.MODIS_INPUT_FILENAME_EXTENSION])
staged_file = os.path.join(self._stage_dir, file_name)
# Download the source data
transfer.download_file_url(download_url, staged_file)
self._hdf_filename = os.path.basename(staged_file)
work_file = os.path.join(self._work_dir, self._hdf_filename)
# Copy the staged data to the work directory
shutil.copyfile(staged_file, work_file)
os.unlink(staged_file)
def convert_to_raw_binary(self):
"""Converts the MODIS input data to our internal raw binary
format
"""
options = self._parms['options']
# Build a command line arguments list
cmd = ['convert_modis_to_espa',
'--hdf', self._hdf_filename]
if not options['include_source_data']:
cmd.append('--del_src_files')
# Turn | |
ASE2020 Performance')
MlPrediction(x_train, y_train, x_test, y_test, y_pred_bats=y_preds, test_case_similarity_list=test_case_similarity_list, algorithm='lr', comparison=ASE2020, cutoff=cut_off).predict()
MlPrediction(x_train, y_train, x_test, y_test, y_pred_bats=y_preds, test_case_similarity_list=test_case_similarity_list, algorithm='rf', comparison=ASE2020, cutoff=cut_off).predict()
with open('./patch'+str(len(patch1278_list))+'.txt', 'w+') as f:
for p in patch1278_list:
f.write(p + '\n')
if patchsim:
print('------')
print('Evaluating PatchSim improvement')
y_combine, y_combine_trues = [], []
y_patchsim = []
BATs_cnt = 0
with open('patch325_result.txt', 'r+') as f_patchsim:
for line in f_patchsim:
line = line.strip()
name_ps, prediction_ps = line.split(',')[0], line.split(',')[1]
i = patch1278_list.index(name_ps)
y_combine_trues.append(y_trues[i])
y_patchsim.append(float(prediction_ps))
if test_case_similarity_list[i] >= 0.8:
y_combine.append(y_preds[i])
BATs_cnt += 1
else:
y_combine.append(float(prediction_ps))
print('BATs_cnt: {}, PatchSim_cnt: {}'.format(BATs_cnt, len(y_combine)-BATs_cnt))
self.evaluation_metrics(y_combine_trues, y_patchsim)
print('----------')
self.evaluation_metrics(y_combine_trues, y_combine)
'''
if patchsim:
print('------')
print('Evaluating Incorrect Excluded on PatchSim')
# [name, y_pred, y_true, y_pred_prob]
recommend_list_project = pd.DataFrame(sorted(recommend_list_project, key=lambda x: x[3], reverse=True))
Correct = recommend_list_project[recommend_list_project[2]==1]
filter_out_incorrect = recommend_list_project.shape[0] - Correct[:].index.tolist()[-1] - 1
print('Test data size: {}, Incorrect: {}, Correct: {}'.format(recommend_list_project.shape[0], recommend_list_project.shape[0]-Correct.shape[0],
Correct.shape[0]))
# print('Exclude incorrect: {}'.format(filter_out_incorrect))
# print('Exclude rate: {}'.format(filter_out_incorrect/(recommend_list_project.shape[0]-Correct.shape[0])))
# print('Excluded name: {}'.format(recommend_list_project.iloc[Correct[:].index.tolist()[-1]+1:][0].values))
# topHalf = recommend_list_project.iloc[:Correct[:].index.tolist()[-1] + 1]
# topHalfIncorrect = topHalf[topHalf[2] == 0][0].values
# print('Noe excluded name: {}'.format(topHalfIncorrect))
'''
self.statistics_box(box_projecs_co, box_projecs_inco, projects_name)
# def improve_ML(self, path_collected_patch=None, cut_off=0.8, distance_method = distance.cosine, kfold=10, algorithm='lr', method='combine'):
# print('Research Question 3: Improvement')
# projects = {'Chart': 26, 'Lang': 65, 'Math': 106, 'Time': 27}
# y_preds_bats, y_preds_prob_bats, y_trues = [], [], []
# x_all, y_all, x_test, y_test = [], [], [], []
# # comparison = 'ASE2020' # will make comparison if the value equals to 'ASE2020'
# mean_stand_dict = {0.0: [443, 816], 0.6: [273, 246], 0.7: [231, 273], 0.8: [180, 235], 0.9: [130, 130]}
# print('test case similarity cut-off: {}'.format(cut_off))
# unique_dict = []
# for project, number in projects.items():
# print('Testing {}'.format(project))
# for id in range(1, number + 1):
# print('----------------')
# print('{}_{}'.format(project, id))
# project_id = '_'.join([project, str(id)])
#
# # extract failed test index according to bug_id
# failed_test_index = [i for i in range(len(self.test_name)) if self.test_name[i].startswith(project_id+'-')]
# if failed_test_index == []:
# print('Couldnt find any failed test case for this bugid: {}'.format(project_id))
# # print('{} patches skipped'.format(len(available_path_patch)))
# continue
#
# # find paths of patches generated by tools
# available_path_patch = self.find_path_patch(path_collected_patch, project_id)
# if available_path_patch == []:
# print('No generated patches of APR tools found:{}'.format(project_id))
# continue
#
# # return vector according to available_path_patch
# name_list, label_list, generated_patch_list, vector_ML_list, vector_ODS_list = self.vector4patch(available_path_patch, compare=ASE2020,)
# if name_list == []:
# print('all the patches can not be recognized')
# continue
#
# # access the associated patch list(patch repository) of similar failed test cases
# associated_patch_list, scaler_patch, closest_score = self.get_associated_patch_list(failed_test_index, k=5, cut_off=cut_off, model=self.patch_w2v)
#
# # print('save train data for ML model of ASE2020')
# if ASE2020 and vector_ML_list != []:
# for i in range(len(vector_ML_list)):
# # if list(vector_list[i].astype(float)) != list(np.zeros(240).astype(float)):
# if vector_ML_list[i] in unique_dict:
# continue
# else:
# x_all.append(vector_ML_list[i])
# y_all.append(label_list[i])
#
# # calculate the center of associated patches(repository)
# if associated_patch_list == []:
# # fill value for the prediction of BATS to keep it the same length as ML prediction
# y_preds_bats += [-999 for i in range(len(vector_ML_list))]
# y_preds_prob_bats += [-999 for i in range(len(vector_ML_list))]
# y_trues += [i for i in label_list]
# else:
# centers = self.dynamic_threshold2(associated_patch_list, distance_method=distance_method, sumup='mean')
# for i in range(len(vector_ML_list)):
# name = name_list[i]
# tested_patch = generated_patch_list[i]
# y_true = label_list[i]
# # y_pred = self.predict_label(centers, threshold_list, vector_new_patch, scaler_patch)
# # y_pred_prob = self.predict_prob(centers, threshold_list, vector_new_patch, scaler_patch)
# y_pred_prob, y_pred = self.predict_recom(centers, tested_patch, scaler_patch, mean_stand_dict[cut_off], distance_method=distance_method,)
#
# if math.isnan(y_pred_prob):
# y_preds_bats.append(-999)
# y_preds_prob_bats.append(-999)
# y_trues.append(y_true)
# else:
# y_preds_bats.append(y_pred)
# y_preds_prob_bats.append(y_pred_prob)
# y_trues.append(y_true)
#
# # run cross validation for ML-based approach in ASE2020
# x_all_unique, y_all_unique, y_preds_prob_bats_unique = [], [], []
# for i in range(len(x_all)):
# if list(x_all[i]) in unique_dict:
# continue
# else:
# unique_dict.append(list(x_all[i]))
# x_all_unique.append(x_all[i])
# y_all_unique.append(y_all[i])
# y_preds_prob_bats_unique.append(y_preds_prob_bats[i])
# x_all_unique = np.array(x_all_unique)
# y_all_unique = np.array(y_all_unique)
# y_preds_prob_bats_unique = np.array(y_preds_prob_bats_unique)
# skf = StratifiedKFold(n_splits=kfold, shuffle=True)
# accs, prcs, rcs, f1s, aucs = list(), list(), list(), list(), list()
# rcs_p, rcs_n = list(), list()
# for train_index, test_index in skf.split(x_all_unique, y_all_unique):
# x_train, y_train = x_all_unique[train_index], y_all_unique[train_index]
# x_test, y_test = x_all_unique[test_index], y_all_unique[test_index]
#
# # prediction by BATs
# # y_pred_bats = y_preds_bats[test_index]
# y_test_pred_prob_bats = y_preds_prob_bats_unique[test_index]
#
# # standard data
# scaler = StandardScaler().fit(x_train)
# # scaler = MinMaxScaler().fit(x_train)
# x_train = scaler.transform(x_train)
# x_test = scaler.transform(x_test)
#
# print('\ntrain data: {}, test data: {}'.format(len(x_train), len(x_test)), end='')
#
# clf = None
# if algorithm == 'lr':
# clf = LogisticRegression(solver='lbfgs', class_weight={1: 1},).fit(X=x_train, y=y_train)
# elif algorithm == 'dt':
# clf = DecisionTreeClassifier().fit(X=x_train, y=y_train, sample_weight=None)
# elif algorithm == 'rf':
# clf = RandomForestClassifier(class_weight={1: 1}, ).fit(X=x_train, y=y_train)
#
# if method == 'combine':
# # combine both
# number_bats = 0
# number_ML = 0
# y_pred_final = []
# for i in range(len(y_test)):
#
# # apply BATs first
# if y_test_pred_prob_bats[i] != -999:
# number_bats += 1
# y_pred_final.append(y_test_pred_prob_bats[i])
# else:
# number_ML += 1
# y_test_pred_prob_ML = clf.predict_proba(x_test[i].reshape(1,-1))[:, 1]
# y_pred_final.append(y_test_pred_prob_ML)
#
# # y_pred_final.append((y_test_pred_prob_bats[i] + clf.predict_proba(x_test[i].reshape(1,-1))[:, 1])/2.0)
# print('\nNumber of BATs and ML: {} {}'.format(number_bats, number_ML))
# else:
# y_pred_final = clf.predict_proba(x_test)[:, 1]
#
# # print('{}: '.format(algorithm))
# recall_p, recall_n, acc, prc, rc, f1, auc_, _ = self.evaluation_metrics(list(y_test), y_pred_final)
#
# accs.append(acc)
# prcs.append(prc)
# rcs.append(rc)
# f1s.append(f1)
#
# aucs.append(auc_)
# rcs_p.append(recall_p)
# rcs_n.append(recall_n)
#
# print('\n{}-fold cross validation mean: '.format(kfold))
# print('Accuracy: {:.1f} -- Precision: {:.1f} -- +Recall: {:.1f} -- F1: {:.1f} -- AUC: {:.3f}'.format(np.array(accs).mean() * 100, np.array(prcs).mean() * 100, np.array(rcs).mean() * 100, np.array(f1s).mean() * 100, np.array(aucs).mean()))
# print('AUC: {:.3f}, +Recall: {:.3f}, -Recall: {:.3f}'.format(np.array(aucs).mean(), np.array(rcs_p).mean(), np.array(rcs_n).mean()))
#
def predict(self, patch_list, new_patch, scaler_patch):
if self.patch_w2v != 'string':
new_patch = scaler_patch.transform(new_patch.reshape((1, -1)))
dist_final = []
# patch list includes multiple patches for multi failed test cases
for y in range(len(patch_list)):
patches = patch_list[y]
dist_k = []
for z in range(len(patches)):
vec = patches[z]
# dist = np.linalg.norm(vec - new_patch)
if self.patch_w2v == 'string':
dist = Levenshtein.distance(vec[0], new_patch[0])
dist_k.append(dist)
else:
# choose method to calculate distance
dist = distance.cosine(vec, new_patch)
# dist = distance.euclidean(vec, new_patch)/(1 + distance.euclidean(vec, new_patch))
dist_k.append(dist)
dist_mean = np.array(dist_k).mean()
dist_min = np.array(dist_k).min()
# print('mean:{} min:{}'.format(dist_mean, dist_min))
dist_final.append(dist_min)
dist_final = np.array(dist_final).mean()
return dist_final
def dynamic_threshold(self, patch_list):
centers = []
threshold_list = []
# patch list includes multiple patches for multi failed test cases
for y in range(len(patch_list)):
patches = patch_list[y]
# threshold 1: center of patch list
center = np.array(patches).mean(axis=0)
dist_mean = np.array([distance.cosine(p, center) for p in patches]).mean()
# dist_mean = np.array([distance.cosine(p, center) for p in patches]).max()
score_mean = 1-dist_mean
centers.append(center)
threshold_list.append(score_mean)
return centers, threshold_list
def dynamic_threshold2(self, patch_list, distance_method=distance.euclidean, sumup='mean'):
# patch_list: [[top-5 patches for failed test case 1], [top-5 patches failed test case 2], [top-5 patches failed test case 3]]
if self.patch_w2v != 'string':
if len(patch_list) == 1:
center = patch_list[0].mean(axis=0)
# if sumup == 'mean':
# dist_mean = np.array([distance_method(p, center) for p in patch_list[0]]).mean()
# elif sumup == 'max':
# dist_mean = np.array([distance_method(p, center) for p in patch_list[0]]).max()
else:
# calculate center
patches = patch_list[0]
for i in range(1, len(patch_list)):
patches = np.concatenate((patches, patch_list[i]), axis=0)
center = patches.mean(axis=0)
# if sumup == 'mean':
# dist_mean = np.array([distance_method(p, center) for p in patches]).mean()
# elif sumup == 'max':
# dist_mean = np.array([distance_method(p, center) for p in patches]).max()
else:
return patch_list
return [center]
def predict_label(self, centers, threshold_list, new_patch, scaler_patch, ):
if self.patch_w2v != 'string':
new_patch = scaler_patch.transform(new_patch.reshape((1, -1)))
vote_list = []
# patch list includes multiple patches for multi failed test cases
for y in range(len(centers)):
center = centers[y]
score_mean = threshold_list[y]
# choose method to calculate distance
dist_new = distance.cosine(new_patch, center)
# dist_new = distance.euclidean(vec, new_patch)/(1 + distance.euclidean(vec, new_patch))
score_new = 1 - dist_new
vote_list.append(1 if score_new >= score_mean else 0)
if vote_list.count(1) >= len(centers) / 2.0:
return 1
else:
return 0
def predict_prob(self, centers, threshold_list, new_patch, scaler_patch, distance_method=distance.euclidean):
if self.patch_w2v != 'string':
new_patch | |
import os
from pathlib import Path
from gd.api.database import Database
from gd.async_utils import run_blocking
from gd.crypto import (
DEFAULT_ENCODING,
DEFAULT_ERRORS,
decode_os_save,
decode_save,
encode_os_save,
encode_save,
)
from gd.logging import get_logger
from gd.platform import LINUX, MACOS, WINDOWS
from gd.text_utils import make_repr
from gd.typing import Optional, Tuple, Union
__all__ = ("MAIN", "LEVELS", "PATH", "SAVE_DELIM", "SaveManager", "create_database", "save")
AnyString = Union[bytes, str]
PathLike = Union[str, Path]
MAIN = "CCGameManager.dat"
LEVELS = "CCLocalLevels.dat"
PATH = Path()
LOCAL_APP_DATA = os.getenv("localappdata", "~/AppData/Local")
SAVE_DELIM = ";"
WINDOWS_DIR = LOCAL_APP_DATA + "/GeometryDash"
MACOS_DIR = "~/Library/Application Support/GeometryDash"
LINUX_DIR = (
"~/.steam/steam/steamapps/compatdata/322170/pfx"
"/drive_c/users/steamuser/Local Settings/Application Data/GeometryDash"
)
log = get_logger(__name__)
try:
if WINDOWS:
PATH = Path(WINDOWS_DIR).expanduser()
elif MACOS:
PATH = Path(MACOS_DIR).expanduser()
elif LINUX:
PATH = Path(LINUX_DIR).expanduser()
else:
raise OSError("Current platform is not supported.")
except Exception: # noqa
log.error("Can not find relevant GD PATH.", exc_info=True)
class SaveManager:
def __repr__(self) -> str:
return make_repr(self)
async def load_async(
self,
main: Optional[PathLike] = None,
levels: Optional[PathLike] = None,
main_file: PathLike = MAIN,
levels_file: PathLike = LEVELS,
) -> Database:
"""Asynchronously load a save.
This function is normally used for local GD management.
Typical use-case might be, as follows:
.. code-block:: python3
database = await gd.api.save.load_async()
.. warning::
Please note that ``main_file`` and ``levels_file`` can **NOT** be ``None``.
Parameters
----------
main: Optional[Union[:class:`str`, :class:`~pathlib.Path`]]
Path to a file/directory containing main part of the save.
levels: Optional[Union[:class:`str`, :class:`~pathlib.Path`]]
Path to a file/directory containing levels part of the save.
main_file: Union[:class:`str`, :class:`~pathlib.Path`]
Path to a file containing main part of the save.
Applied when ``main`` is a directory.
levels_file: Union[:class:`str`, :class:`~pathlib.Path`]
Path to a file containing levels part of the save.
Applied when ``levels`` is a directory.
Returns
-------
:class:`~gd.api.Database`
Loaded Database. If any of the files not found, returns an empty ``gd.api.Database()``.
"""
return await run_blocking(
self.local_load, main=main, levels=levels, main_file=main_file, levels_file=levels_file,
)
def load(
self,
main: Optional[PathLike] = None,
levels: Optional[PathLike] = None,
main_file: PathLike = MAIN,
levels_file: PathLike = LEVELS,
) -> Database:
"""Load a save.
This function is normally used for local GD management.
Typical use-case might be, as follows:
.. code-block:: python3
database = gd.api.save.load()
.. warning::
Please note that ``main_file`` and ``levels_file`` can **NOT** be ``None``.
Parameters
----------
main: Optional[Union[:class:`str`, :class:`~pathlib.Path`]]
Path to a file/directory containing main part of the save.
levels: Optional[Union[:class:`str`, :class:`~pathlib.Path`]]
Path to a file/directory containing levels part of the save.
main_file: Union[:class:`str`, :class:`~pathlib.Path`]
Path to a file containing main part of the save.
Applied when ``main`` is a directory.
levels_file: Union[:class:`str`, :class:`~pathlib.Path`]
Path to a file containing levels part of the save.
Applied when ``levels`` is a directory.
Returns
-------
:class:`~gd.api.Database`
Loaded Database. If any of the files not found, returns an empty ``gd.api.Database()``.
"""
return self.local_load(
main=main, levels=levels, main_file=main_file, levels_file=levels_file
)
async def dump_async(
self,
database: Database,
main: Optional[PathLike] = None,
levels: Optional[PathLike] = None,
main_file: PathLike = MAIN,
levels_file: PathLike = LEVELS,
) -> None:
"""Asynchronously dump a save.
This function is normally used for local GD management.
Typical use-case might be, as follows:
.. code-block:: python3
await gd.api.save.dump_async(database)
.. warning::
Please note that ``main_file`` and ``levels_file`` can **NOT** be ``None``.
Parameters
----------
database: :class:`~gd.api.Database`
Database object to dump.
main: Optional[Union[:class:`str`, :class:`~pathlib.Path`]]
Path to a file/directory containing main part of the save.
levels: Optional[Union[:class:`str`, :class:`~pathlib.Path`]]
Path to a file/directory containing levels part of the save.
main_file: Union[:class:`str`, :class:`~pathlib.Path`]
Path to a file containing main part of the save.
Applied when ``main`` is a directory.
levels_file: Union[:class:`str`, :class:`~pathlib.Path`]
Path to a file containing levels part of the save.
Applied when ``levels`` is a directory.
"""
await run_blocking(
self.local_dump,
database,
main=main,
levels=levels,
main_file=main_file,
levels_file=levels_file,
)
def dump(
self,
database: Database,
main: Optional[PathLike] = None,
levels: Optional[PathLike] = None,
main_file: PathLike = MAIN,
levels_file: PathLike = LEVELS,
) -> None:
"""Dump a save.
This function is normally used for local GD management.
Typical use-case might be, as follows:
.. code-block:: python3
gd.api.save.dump(database)
.. warning::
Please note that ``main_file`` and ``levels_file`` can **NOT** be ``None``.
Parameters
----------
database: :class:`~gd.api.Database`
Database object to dump.
main: Optional[Union[:class:`str`, :class:`~pathlib.Path`]]
Path to a file/directory containing main part of the save.
levels: Optional[Union[:class:`str`, :class:`~pathlib.Path`]]
Path to a file/directory containing levels part of the save.
main_file: Union[:class:`str`, :class:`~pathlib.Path`]
Path to a file containing main part of the save.
Applied when ``main`` is a directory.
levels_file: Union[:class:`str`, :class:`~pathlib.Path`]
Path to a file containing levels part of the save.
Applied when ``levels`` is a directory.
"""
self.local_dump(
database, main=main, levels=levels, main_file=main_file, levels_file=levels_file
)
async def to_string_async(
self,
database: Database,
apply_xor: bool = False,
follow_os: bool = False,
decode: bool = False,
) -> Union[Tuple[bytes, bytes], Tuple[str, str]]:
"""Asynchronously dump a save into strings.
This might be used when you need to transfer your save as a stream.
Parameters
----------
database: :class:`~gd.api.Database`
Database object to dump.
apply_xor: :class:`bool`
Whether to apply *XOR* function to given data (used by local saves mostly).
follow_os: :class:`bool`
Whether to use same encoding as in local saves on the given OS.
decode: :class:`bool`
Whether to convert :class:`bytes` to :class:`str` before returning.
Returns
-------
Union[Tuple[:class:`bytes`, :class:`bytes`], Tuple[:class:`str`, :class:`str`]]
A ``(main, levels)`` tuple, containing strings or bytes depending on ``decode``.
"""
main, levels = await run_blocking(
self.dump_parts, database, apply_xor=apply_xor, follow_os=follow_os
)
if decode:
return (
main.decode(DEFAULT_ENCODING, DEFAULT_ERRORS),
levels.decode(DEFAULT_ENCODING, DEFAULT_ERRORS),
)
return (main, levels)
def to_string(
self,
database: Database,
apply_xor: bool = False,
follow_os: bool = False,
decode: bool = False,
) -> Union[Tuple[bytes, bytes], Tuple[str, str]]:
"""Dump a save into strings.
This might be used when you need to transfer your save a stream.
Parameters
----------
database: :class:`~gd.api.Database`
Database object to dump.
apply_xor: :class:`bool`
Whether to apply *XOR* function to given data (used by local saves mostly).
follow_os: :class:`bool`
Whether to use same encoding as in local saves on the given OS.
decode: :class:`bool`
Whether to convert :class:`bytes` to :class:`str` before returning.
Returns
-------
Union[Tuple[:class:`bytes`, :class:`bytes`], Tuple[:class:`str`, :class:`str`]]
A ``(main, levels)`` tuple, containing strings or bytes depending on ``decode``.
"""
main, levels = self.dump_parts(database, apply_xor=apply_xor, follow_os=follow_os)
if decode:
return (
main.decode(DEFAULT_ENCODING, DEFAULT_ERRORS),
levels.decode(DEFAULT_ENCODING, DEFAULT_ERRORS),
)
return (main, levels)
async def from_string_async(
self,
main: AnyString = "",
levels: AnyString = "",
apply_xor: bool = False,
follow_os: bool = False,
) -> Database:
"""Asynchronoulsy load save from strings.
Parameters
----------
main: Union[:class:`bytes`, :class:`str`]
A string containing encoded main part of the save.
levels: Union[:class:`bytes`, :class:`str`]
A string containing encoded levels part of the save.
apply_xor: :class:`bool`
Whether to apply *XOR* function to given data (used by local saves mostly).
follow_os: :class:`bool`
Whether to use same decoding as in local saves on the given OS.
Returns
-------
:class:`~gd.api.Database`
Database object containing loaded data.
"""
return await run_blocking(
self.load_parts, main=main, levels=levels, apply_xor=apply_xor, follow_os=follow_os,
)
def from_string(
self,
main: AnyString = "",
levels: AnyString = "",
apply_xor: bool = False,
follow_os: bool = False,
) -> Database:
"""Load save from strings.
Parameters
----------
main: Union[:class:`bytes`, :class:`str`]
A string containing encoded main part of the save.
levels: Union[:class:`bytes`, :class:`str`]
A string containing encoded levels part of the save.
apply_xor: :class:`bool`
Whether to apply *XOR* function to given data (used by local saves mostly).
follow_os: :class:`bool`
Whether to use same decoding as in local saves on the given OS.
Returns
-------
:class:`~gd.api.Database`
Database object containing loaded data.
"""
return self.load_parts(main=main, levels=levels, apply_xor=apply_xor, follow_os=follow_os)
def create_database(
self, main: AnyString = "", levels: AnyString = "" # type: ignore
) -> Database:
"""Create a database from string parts.
This method should be used if you already have XML strings, or it can be used
as a more understandable way of doing ``gd.api.Database()`` creation:
.. code-block:: python3
database = gd.api.save.create_database() # or supply arguments
Parameters
----------
main: Union[:class:`bytes`, :class:`str`]
A string containing main XML part of the save.
levels: Union[:class:`bytes`, :class:`str`]
A string containing levels XML part of the save.
Returns
-------
:class:`~gd.api.Database`
Database object containing loaded data.
"""
return Database(main, levels)
def decode_stream(
self, stream: bytes, apply_xor: bool = True, follow_os: bool = True
) -> bytes:
decoder = decode_os_save if follow_os else decode_save
return decoder(stream, apply_xor=apply_xor)
def encode_stream(
self, stream: bytes, apply_xor: | |
# -*- coding: utf-8 -*-
import hashlib
import logging
import os
import warnings
from collections.abc import MutableMapping
from pathlib import Path
from typing import Callable, Dict, List, Optional, Union
import imageio
import numpy as np
from matplotlib import cm, colors, patches
from matplotlib import pyplot as plt
from mpl_toolkits.axes_grid1 import make_axes_locatable
from skimage import transform
from eyepy.core import config
from eyepy.core.drusen import DefaultDrusenFinder, DrusenFinder
from eyepy.core.quantifier import DefaultEyeQuantifier, EyeQuantifier
logger = logging.getLogger(__name__)
class Meta(MutableMapping):
def __init__(self, *args, **kwargs):
"""The Meta object is a dict with additional functionalities.
The additional functionallities are:
1. A string representation suitable for printing the meta information.
2. Checking if a keys value is a callable before returning it. In case
it is a callable, it sets the value to the return value of the callable.
This is used for lazy loading OCT data. The meta information for the OCT
and all B-Scans is only read from the file when accessed.
An instance of the meta object can be created as you would create an
ordinary dictionary.
For example:
+ Meta({"SizeX": 512})
+ Meta(SizeX=512)
+ Meta([(SizeX, 512), (SizeY, 512)])
"""
self._store = dict()
self.update(dict(*args, **kwargs)) # use the free update to set keys
def __getitem__(self, key):
value = self._store[key]
if callable(value):
self[key] = value()
return self._store[key]
def __setitem__(self, key, value):
self._store[key] = value
def __delitem__(self, key):
del self._store[key]
def __iter__(self):
return iter(self._store)
def __len__(self):
return len(self._store)
def __str__(self):
return f"{os.linesep}".join([f"{f}: {self[f]}" for f in self if f != "__empty"])
def __repr__(self):
return self.__str__()
class EnfaceImage:
def __init__(self, data, name=None):
self._data = data
self._name = name
@property
def data(self):
"""Return the enface image as numpy array."""
if callable(self._data):
self._data = self._data()
return self._data
@property
def name(self):
if self._name is None:
raise ValueError("This EnfaceImage has no respective filename")
else:
return self._name
class Annotation(MutableMapping):
def __init__(self, *args, **kwargs):
self._store = dict()
self.update(dict(*args, **kwargs)) # use the free update to set keys
self._bscan = None
def __getitem__(self, key):
value = self._store[key]
if callable(value):
self[key] = value(self.bscan)
return self._store[key]
def __setitem__(self, key, value):
self._store[key] = value
def __delitem__(self, key):
del self._store[key]
def __iter__(self):
return iter(self._store)
def __len__(self):
return len(self._store)
# TODO: Make the annotation printable to get an overview
# def __str__(self):
# return f"{os.linesep}".join(
# [f"{f}: {self[f]}" for f in self if f != "__empty"])
# def __repr__(self):
# return self.__str__()
@property
def bscan(self):
if self._bscan is None:
raise AttributeError("bscan is not set for this Annotation.")
return self._bscan
@bscan.setter
def bscan(self, value: "Bscan"):
self._bscan = value
class LayerAnnotation(MutableMapping):
def __init__(self, data, layername_mapping=None, max_height=2000):
self._data = data
self.max_height = max_height
if layername_mapping is None:
self.mapping = config.SEG_MAPPING
else:
self.mapping = layername_mapping
@property
def data(self):
if callable(self._data):
self._data = self._data()
return self._data
def __getitem__(self, key):
data = self.data[self.mapping[key]]
nans = np.isnan(data)
empty = np.nonzero(
np.logical_or(
np.less(data, 0, where=~nans),
np.greater(data, self.max_height, where=~nans),
)
)
data = np.copy(data)
data[empty] = np.nan
if np.nansum(data) > 0:
return data
else:
raise KeyError(f"There is no data given for the {key} layer")
def __setitem__(self, key, value):
self.data[self.mapping[key]] = value
def __delitem__(self, key):
self.data[self.mapping[key], :] = np.nan
def __iter__(self):
inv_map = {v: k for k, v in self.mapping.items()}
return iter(inv_map.values())
def __len__(self):
return len(self.data.shape[0])
def layer_indices(self, key):
layer = self[key]
nan_indices = np.isnan(layer)
col_indices = np.arange(len(layer))[~nan_indices]
row_indices = np.rint(layer).astype(int)[~nan_indices]
return (row_indices, col_indices)
class Bscan:
def __new__(
cls,
data,
annotation=None,
meta=None,
data_processing=None,
oct_obj=None,
name=None,
*args,
**kwargs,
):
# Make all meta fields accessible as attributes of the BScan without
# reading them. Set a property instead
def meta_func_builder(x):
return lambda self: self.meta[x]
if meta is not None:
for key in meta:
setattr(cls, key, property(meta_func_builder(key)))
return object.__new__(cls, *args, **kwargs)
def __init__(
self,
data: Union[np.ndarray, Callable],
annotation: Optional[Annotation] = None,
meta: Optional[Union[Dict, Meta]] = None,
data_processing: Optional[Callable] = None,
oct_obj: Optional["Oct"] = None,
name: Optional[str] = None,
):
"""
Parameters
----------
data : A numpy array holding the raw B-Scan data or a callable which
returns a raw B-Scan. Raw means that it represents the unprocessed
stored data. The actual dtype and value range depends on the storage
format.
annotation: Dict holding B-Scan annotations
meta : A dictionary holding the B-Scans meta informations or
oct_obj : Reference to the OCT Volume holding the B-Scan
name : Filename of the B-Scan if B-Scan is save as individual file
"""
self._scan_raw = data
self._scan = None
self._meta = meta
self._oct_obj = oct_obj
self._annotation = annotation
if data_processing is None:
self._data_processing = lambda x: x
else:
self._data_processing = data_processing
self._name = name
@property
def oct_obj(self):
if self._oct_obj is None:
raise AttributeError("oct_obj is not set for this Bscan object")
return self._oct_obj
@oct_obj.setter
def oct_obj(self, value):
self._oct_obj = value
@property
def name(self):
if self._name is None:
self._name = str(self.index)
return self._name
@property
def index(self):
return self.oct_obj.bscans.index(self)
@property
def meta(self):
"""A dict holding all Bscan meta data."""
return self._meta
@property
def annotation(self):
"""A dict holding all Bscan annotation data."""
if self._annotation is None:
self._annotation = Annotation({})
elif callable(self._annotation):
self._annotation = self._annotation()
self._annotation.bscan = self
return self._annotation
@property
def scan_raw(self):
"""An array holding a single raw B-Scan.
The dtype is not changed after the import. If available this is
the unprocessed output of the OCT device. In any case this is
the unprocessed data imported by eyepy.
"""
if callable(self._scan_raw):
self._scan_raw = self._scan_raw()
return self._scan_raw
@property
def scan(self):
"""An array holding a single B-Scan with the commonly used contrast.
The array is of dtype <ubyte> and encodes the intensities as
values between 0 and 255.
"""
if self._scan is None:
self._scan = self._data_processing(self.scan_raw)
return self._scan
@property
def shape(self):
return self.scan.shape
@property
def layers(self):
if "layers" not in self.annotation:
l_shape = np.zeros(
(max(config.SEG_MAPPING.values()) + 1, self.oct_obj.SizeX)
)
self.annotation["layers"] = LayerAnnotation(l_shape)
if callable(self.annotation["layers"]):
self.annotation["layers"] = self.annotation["layers"]()
return self.annotation["layers"]
@property
def drusen_raw(self):
"""Return drusen computed from the RPE and BM layer segmentation.
The raw drusen are computed based on single B-Scans
"""
return self._oct_obj.drusen_raw[..., self.index]
@property
def drusen(self):
"""Return filtered drusen.
Drusen are filtered based on the complete volume
"""
return self._oct_obj.drusen[..., self.index]
def plot(
self,
ax=None,
layers=None,
drusen=False,
layers_kwargs=None,
layers_color=None,
annotation_only=False,
region=np.s_[:, :],
):
"""Plot B-Scan with segmented Layers."""
if ax is None:
ax = plt.gca()
# Complete region index expression
if region[0].start is None:
r0_start = 0
else:
r0_start = region[0].start
if region[1].start is None:
r1_start = 0
else:
r1_start = region[1].start
if region[0].stop is None:
r0_stop = self.shape[0]
else:
r0_stop = region[0].stop
if region[1].stop is None:
r1_stop = self.shape[1]
else:
r1_stop = region[1].stop
region = np.s_[r0_start:r0_stop, r1_start:r1_stop]
if layers is None:
layers = []
elif layers == "all":
layers = self.layers.keys()
if layers_kwargs is None:
layers_kwargs = config.layers_kwargs
else:
layers_kwargs = {**config.layers_kwargs, **layers_kwargs}
if layers_color is None:
layers_color = config.layers_color
else:
layers_color = {**config.layers_color, **layers_color}
if not annotation_only:
ax.imshow(self.scan[region], cmap="gray")
if drusen:
visible = np.zeros(self.drusen.shape)
visible[self.drusen] = 1.0
ax.imshow(self.drusen[region], alpha=visible[region], cmap="Reds")
for layer in layers:
color = layers_color[layer]
try:
layer_data = self.layers[layer]
# Adjust layer height to plotted region
layer_data = layer_data - region[0].start
# Remove layer if outside of region
layer_data = layer_data[region[1].start : region[1].stop]
layer_data[layer_data < 0] = 0
region_height = region[0].stop - region[0].start
layer_data[layer_data > region_height] = region_height
ax.plot(
layer_data, color=color, label=layer, **layers_kwargs,
)
except KeyError:
warnings.warn(f"Layer '{layer}' has no Segmentation", UserWarning)
class Oct:
""".vol header
-----------
All fields from the .vol header (the oct meta data) can be accessed as attributes of the
HeyexOct object.
SLO
---
The attribute `slo` of the HeyexOct object gives access to the IR SLO image
and returns it as a numpy.ndarray of dtype `uint8`.
B-Scans
-------
Individual B-Scans can be accessed using `oct_scan[index]`. The returned
HeyexBscan object exposes all B-Scan header fields as attributes and the
raw B-Scan image as `numpy.ndarray` of type `float32` under the attribute
`scan_raw`. A transformed version of the raw B-Scan which is more similar to
the Heyex experience can be accessed with the attribute `scan` and returns
the 4th root of the raw B-Scan scaled to [0,255] as `uint8`.
Segmentations
-------------
B-Scan segmentations can be accessed for individual B-Scans like
`bscan.segmentation`. This return a numpy.ndarray of shape | |
#!/usr/bin/env python3
"""
Module to implement the Modified Seminario Method
Originally written by <NAME>, TCM, University of Cambridge
Modified by <NAME> and rewritten by <NAME>, Newcastle University
Reference using AEA Allen, MC Payne, DJ Cole, J. Chem. Theory Comput. (2018), doi:10.1021/acs.jctc.7b00785
"""
from QUBEKit.utils import constants
from QUBEKit.utils.decorators import for_all_methods, timer_logger
from operator import itemgetter
import numpy as np
class ModSemMaths:
"""Static methods for various mathematical functions relevant to the modified Seminario method."""
def __repr__(self):
return f'{self.__class__.__name__}({self.__dict__!r})'
@staticmethod
def unit_vector_normal_to_bond(u_bc, u_ab):
"""Calculates unit vector which is normal to the plane abc."""
cross = np.cross(u_bc, u_ab)
return cross / np.linalg.norm(cross)
@staticmethod
def unit_vector_along_bond(coords, bond):
"""Calculates the unit vector along a bond."""
atom_a, atom_b = bond
diff_ab = coords[atom_b] - coords[atom_a]
return diff_ab / np.linalg.norm(diff_ab)
@staticmethod
def u_pa_from_angles(angle, coords):
"""This gives the vector in the plane a, b, c and perpendicular to a to b."""
atom_a, atom_b, atom_c = angle
u_ab = ModSemMaths.unit_vector_along_bond(coords, (atom_a, atom_b))
u_cb = ModSemMaths.unit_vector_along_bond(coords, (atom_c, atom_b))
u_n = ModSemMaths.unit_vector_normal_to_bond(u_cb, u_ab)
return ModSemMaths.unit_vector_normal_to_bond(u_n, u_ab)
@staticmethod
def dot_product(u_pa, eig_ab):
return sum(u_pa[i] * eig_ab[i].conjugate() for i in range(3))
@staticmethod
def force_constant_bond(bond, eigenvals, eigenvecs, coords):
"""Force Constant - Equation 10 of Seminario paper - gives force constant for bond."""
atom_a, atom_b = bond
eigenvals_ab = eigenvals[atom_a, atom_b, :]
eigenvecs_ab = eigenvecs[:, :, atom_a, atom_b]
unit_vectors_ab = ModSemMaths.unit_vector_along_bond(coords, bond)
return -0.5 * sum(eigenvals_ab[i] * abs(np.dot(unit_vectors_ab, eigenvecs_ab[:, i])) for i in range(3))
@staticmethod
def force_constant_angle(angle, bond_lens, eigenvals, eigenvecs, coords, scalings):
"""
Force Constant - Equation 14 of Seminario paper - gives force constant for angle
(in kcal/mol/rad^2) and equilibrium angle (in degrees).
"""
atom_a, atom_b, atom_c = angle
u_ab = ModSemMaths.unit_vector_along_bond(coords, (atom_a, atom_b))
u_cb = ModSemMaths.unit_vector_along_bond(coords, (atom_c, atom_b))
bond_len_ab = bond_lens[atom_a, atom_b]
eigenvals_ab = eigenvals[atom_a, atom_b, :]
eigenvecs_ab = eigenvecs[:3, :3, atom_a, atom_b]
bond_len_bc = bond_lens[atom_b, atom_c]
eigenvals_cb = eigenvals[atom_c, atom_b, :]
eigenvecs_cb = eigenvecs[:3, :3, atom_c, atom_b]
# Normal vector to angle plane found
u_n = ModSemMaths.unit_vector_normal_to_bond(u_cb, u_ab)
# Angle is linear:
if abs(np.linalg.norm(u_cb - u_ab)) < 0.01 or (1.99 < abs(np.linalg.norm(u_cb - u_ab)) < 2.01):
# Scalings are set to 1.
k_theta, theta_0 = ModSemMaths.f_c_a_special_case(
u_ab, u_cb, [bond_len_ab, bond_len_bc], [eigenvals_ab, eigenvals_cb], [eigenvecs_ab, eigenvecs_cb])
else:
u_pa = ModSemMaths.unit_vector_normal_to_bond(u_n, u_ab)
u_pc = ModSemMaths.unit_vector_normal_to_bond(u_cb, u_n)
# Scaling due to additional angles - Modified Seminario Part
sum_first = sum(eigenvals_ab[i] * abs(ModSemMaths.dot_product(u_pa, eigenvecs_ab[:, i])) for i in range(3)) / scalings[0]
sum_second = sum(eigenvals_cb[i] * abs(ModSemMaths.dot_product(u_pc, eigenvecs_cb[:, i])) for i in range(3)) / scalings[1]
# Added as two springs in series
k_theta = (1 / ((bond_len_ab ** 2) * sum_first)) + (1 / ((bond_len_bc ** 2) * sum_second))
k_theta = 1 / k_theta
# Change to OPLS form
k_theta = abs(k_theta * 0.5)
# Equilibrium Angle
theta_0 = np.degrees(np.arccos(np.dot(u_ab, u_cb)))
return k_theta, theta_0
@staticmethod
def f_c_a_special_case(u_ab, u_cb, bond_lens, eigenvals, eigenvecs):
"""
Force constant angle special case, for example nitrile groups.
This is for when the bond is linear, and therefore cannot be sampled around in the same way.
The perpendicular vector is not defined for a linear bond.
"""
# Number of samples around the bond.
n_samples = 200
k_theta_array = np.zeros(n_samples)
for theta in range(n_samples):
u_n = [np.sin(theta) * np.cos(theta), np.sin(theta) * np.sin(theta), np.cos(theta)]
u_pa = ModSemMaths.unit_vector_normal_to_bond(u_n, u_ab)
u_pc = ModSemMaths.unit_vector_normal_to_bond(u_cb, u_n)
sum_first = sum(eigenvals[0][i] * abs(ModSemMaths.dot_product(u_pa, eigenvecs[0][:, i])) for i in range(3))
sum_second = sum(eigenvals[1][i] * abs(ModSemMaths.dot_product(u_pc, eigenvecs[1][:, i])) for i in range(3))
k_theta_i = (1 / ((bond_lens[0] ** 2) * sum_first)) + (1 / ((bond_lens[1] ** 2) * sum_second))
k_theta_i = 1 / k_theta_i
k_theta_array[theta] = abs(k_theta_i * 0.5)
k_theta = np.average(k_theta_array)
theta_0 = np.degrees(np.arccos(np.dot(u_ab, u_cb)))
return k_theta, theta_0
@for_all_methods(timer_logger)
class ModSeminario:
def __init__(self, molecule):
self.molecule = molecule
self.size_mol = len(self.molecule.atoms)
# Find bond lengths and create empty matrix of correct size.
self.bond_lens = np.zeros((self.size_mol, self.size_mol))
self.coords = self.molecule.coords['qm']
def __repr__(self):
return f'{self.__class__.__name__}({self.__dict__!r})'
def modified_seminario_method(self):
"""
Calculate the new bond and angle terms after being passed the symmetric Hessian and
optimised molecule coordinates.
"""
eigenvecs = np.empty((3, 3, self.size_mol, self.size_mol), dtype=complex)
eigenvals = np.empty((self.size_mol, self.size_mol, 3), dtype=complex)
for i in range(self.size_mol):
for j in range(self.size_mol):
diff_i_j = self.coords[i, :] - self.coords[j, :]
self.bond_lens[i, j] = np.linalg.norm(diff_i_j)
partial_hessian = self.molecule.hessian[(i * 3):((i + 1) * 3), (j * 3):((j + 1) * 3)]
eigenvals[i, j, :], eigenvecs[:, :, i, j] = np.linalg.eig(partial_hessian)
# The bond and angle values are calculated and written to file.
self.calculate_bonds(eigenvals, eigenvecs)
self.calculate_angles(eigenvals, eigenvecs)
def calculate_angles(self, eigenvals, eigenvecs):
"""
Uses the modified Seminario method to find the angle parameters and prints them to file.
"""
# A structure is created with the index giving the central atom of the angle;
# an array then lists the angles with that central atom.
# e.g. central_atoms_angles[3] contains an array of angles with central atom 3.
# Connectivity information for Modified Seminario Method
central_atoms_angles = []
for coord in range(self.size_mol):
central_atoms_angles.append([])
for count, angle in enumerate(self.molecule.angles):
if coord == angle[1]:
# For angle abc, atoms a, c are written to array
central_atoms_angles[coord].append([angle[0], angle[2], count])
# For angle abc, atoms c a are written to array
central_atoms_angles[coord].append([angle[2], angle[0], count])
# Sort rows by atom number
for coord in range(self.size_mol):
central_atoms_angles[coord] = sorted(central_atoms_angles[coord], key=itemgetter(0))
# Find normals u_pa for each angle
unit_pa_all_angles = []
for i in range(len(central_atoms_angles)):
unit_pa_all_angles.append([])
for j in range(len(central_atoms_angles[i])):
# For the angle at central_atoms_angles[i][j,:] the u_pa value is found for plane abc and bond ab,
# where abc corresponds to the order of the arguments. This is why the reverse order was also added.
angle = central_atoms_angles[i][j][0], i, central_atoms_angles[i][j][1]
unit_pa_all_angles[i].append(ModSemMaths.u_pa_from_angles(angle, self.coords))
# Finds the contributing factors from the other angle terms
scaling_factor_all_angles = []
for i in range(len(central_atoms_angles)):
scaling_factor_all_angles.append([])
for j in range(len(central_atoms_angles[i])):
n = m = 1
angles_around = extra_contribs = 0
scaling_factor_all_angles[i].append([0, 0])
# Position in angle list
scaling_factor_all_angles[i][j][1] = central_atoms_angles[i][j][2]
# Goes through the list of angles with the same central atom, then computes the term needed for MSM.
# Forwards direction, finds the same bonds with the central atom i
while ((j + n) < len(central_atoms_angles[i])) and central_atoms_angles[i][j][0] == central_atoms_angles[i][j + n][0]:
extra_contribs += (abs(np.dot(unit_pa_all_angles[i][j][:], unit_pa_all_angles[i][j + n][:]))) ** 2
n += 1
angles_around += 1
# Backwards direction, finds the same bonds with the central atom i
while ((j - m) >= 0) and central_atoms_angles[i][j][0] == central_atoms_angles[i][j - m][0]:
extra_contribs += (abs(np.dot(unit_pa_all_angles[i][j][:], unit_pa_all_angles[i][j - m][:]))) ** 2
m += 1
angles_around += 1
scaling_factor_all_angles[i][j][0] = 1
if n != 1 or m != 1:
# Finds the mean value of the additional contribution
scaling_factor_all_angles[i][j][0] += (extra_contribs / (m + n - 2))
scaling_factors_angles_list = [[]] * len(self.molecule.angles)
# Orders the scaling factors according to the angle list
for i in range(len(central_atoms_angles)):
for j in range(len(central_atoms_angles[i])):
scaling_factors_angles_list[scaling_factor_all_angles[i][j][1]].append(scaling_factor_all_angles[i][j][0])
k_theta, theta_0 = np.zeros(len(self.molecule.angles)), np.zeros(len(self.molecule.angles))
conversion = constants.KCAL_TO_KJ * 2
with open('Modified_Seminario_Angles.txt', f'{"w" if self.molecule.restart else "a+"}') as angle_file:
for i, angle in enumerate(self.molecule.angles):
scalings = scaling_factors_angles_list[i][:2]
# Ensures that there is no difference when the ordering is changed.
ab_k_theta, ab_theta_0 = ModSemMaths.force_constant_angle(angle, self.bond_lens, eigenvals, eigenvecs, self.coords, scalings)
ba_k_theta, ba_theta_0 = ModSemMaths.force_constant_angle(angle[::-1], self.bond_lens, eigenvals, eigenvecs, self.coords, scalings[::-1])
# Vib_scaling takes into account DFT deficiencies / anharmonicity.
k_theta[i] = ((ab_k_theta + ba_k_theta) / 2) * (self.molecule.vib_scaling ** 2)
theta_0[i] = (ab_theta_0 + ba_theta_0) / 2
angle_file.write(f'{self.molecule.atoms[angle[0]].atom_name}-{self.molecule.atoms[angle[1]].atom_name}-{self.molecule.atoms[angle[2]].atom_name} ')
angle_file.write(f'{k_theta[i]:.3f} {theta_0[i]:.3f} {angle[0]} {angle[1]} {angle[2]}\n')
# Add ModSem values to ligand object.
self.molecule.HarmonicAngleForce[angle] = [theta_0[i] * constants.DEG_TO_RAD, k_theta[i] * conversion]
def calculate_bonds(self, eigenvals, eigenvecs):
"""
Uses the modified Seminario method to find the bond parameters and print them to file.
"""
bonds = self.molecule.topology.edges
conversion = constants.KCAL_TO_KJ * 200
k_b, bond_len_list = np.zeros(len(bonds)), np.zeros(len(bonds))
with open('Modified_Seminario_Bonds.txt', f'{"w" if self.molecule.restart else "a+"}') as bond_file:
for pos, bond in enumerate(bonds):
ab = ModSemMaths.force_constant_bond(bond, eigenvals, eigenvecs, self.coords)
ba = ModSemMaths.force_constant_bond(bond[::-1], eigenvals, eigenvecs, self.coords)
# Order of bonds sometimes causes slight differences; find the mean and apply vib_scaling.
k_b[pos] = np.real((ab + ba) / 2) * (self.molecule.vib_scaling ** 2)
bond_len_list[pos] = self.bond_lens[bond]
bond_file.write(f'{self.molecule.atoms[bond[0]].atom_name}-{self.molecule.atoms[bond[1]].atom_name} ')
bond_file.write(f'{k_b[pos]:.3f} {bond_len_list[pos]:.3f} {bond[0]} {bond[1]}\n')
# Add ModSem values | |
p = Process(target=subprocess.Popen, args=(command,), kwargs=dict(shell=True))
p.start()
def build_toy_socket_server_c():
"""Build the socket server with the toy socket evaluator in C"""
# Make sure only toy socket is the only evaluator that is built (set the values of all
# rw_evaluators to zero)
for rw_evaluator in rw_evaluators:
_set_external_evaluator(rw_evaluator, 0)
_build_socket_server_c()
def build_toy_socket_server_python():
"""Prepare the socket server with the toy socket evaluator in Python"""
# Make sure only toy socket is the only evaluator that is built (set the values of all
# rw_evaluators to zero)
for rw_evaluator in rw_evaluators:
_set_external_evaluator(rw_evaluator, 0)
def _build_rw_top_trumps_lib():
"""Build the top trumps library with the top trumps evaluator (in C)"""
# Build the library
rw_library = 'rw_top_trumps'
make(os.path.join('code-experiments', 'rw-problems', 'top_trumps'), 'clean',
verbose=_build_verbosity)
make(os.path.join('code-experiments', 'rw-problems', 'top_trumps'), 'all',
verbose=_build_verbosity)
if 'win32' in sys.platform:
rw_library += '.dll'
else:
rw_library = 'lib' + rw_library + '.so'
try:
# Copy the library so that the socket server finds it
copy_file(os.path.join('code-experiments', 'rw-problems', 'top_trumps', rw_library),
os.path.join('code-experiments', 'rw-problems', rw_library))
except PermissionError:
# The rw-top-trumps library is probably already used by some running server
print('WARNING! The rw-top-trumps library was not copied due to a permission error')
if 'darwin' in sys.platform:
library_des = '/usr/local/lib/' + rw_library
if not os.path.lexists(library_des):
# Create a symlink to the library to be used at run-time
library_src = os.path.abspath(os.path.join('code-experiments', 'rw-problems',
'top_trumps', rw_library))
os.symlink(library_src, library_des)
def build_rw_top_trumps_server(force_download=False, exclusive_evaluator=True):
"""Download and build the socket server with the top trumps evaluator (in C)"""
# Download the data
url_name = 'https://github.com/ttusar/top-trumps/archive/master.zip'
_download_external_evaluator('top_trumps', url_name, force_download=force_download)
# Build the library
_build_rw_top_trumps_lib()
if exclusive_evaluator:
# Build the socket server that uses only the rw_top_trumps evaluator
for rw_evaluator in rw_evaluators:
if rw_evaluator == rw_evaluator_top_trumps:
_set_external_evaluator(rw_evaluator, 1)
else:
_set_external_evaluator(rw_evaluator, 0)
_build_socket_server_c()
def build_rw_mario_gan_server(force_download=False, exclusive_evaluator=True):
"""Download data and prepare the socket server to use the mario gan evaluator in Python"""
# Download the data
url_name = 'https://github.com/TheHedgeify/mario-gan/archive/master.zip'
_download_external_evaluator('mario_gan', url_name, force_download=force_download)
if exclusive_evaluator:
# Set the socket server to use only the rw_gan_mario evaluator
for rw_evaluator in rw_evaluators:
if rw_evaluator == rw_evaluator_mario_gan:
_set_external_evaluator(rw_evaluator, 1)
else:
_set_external_evaluator(rw_evaluator, 0)
def build_socket_servers(force_download=False):
"""Build the socket server with all available evaluators in C and Python"""
# Set the socket server to use all evaluators
for rw_evaluator in rw_evaluators:
_set_external_evaluator(rw_evaluator, 1)
build_rw_top_trumps_server(force_download=force_download, exclusive_evaluator=False)
build_rw_mario_gan_server(force_download=force_download, exclusive_evaluator=False)
def run_toy_socket_server_c(port, do_build=False):
"""Build and run the socket server with the toy socket evaluator in C"""
if do_build:
build_toy_socket_server_c()
_run_socket_server_c(port)
def run_toy_socket_server_python(port, do_build=False):
"""Build and run the socket server with the toy socket evaluator in Python"""
if do_build:
build_toy_socket_server_python()
_run_socket_server_python(port)
def run_rw_top_trumps_server(port, force_download=False, do_build=False):
"""Build (if do_build) and run the socket server with the top trumps evaluator (in C)"""
if do_build:
# Only build the server when required
build_rw_top_trumps_server(force_download=force_download, exclusive_evaluator=True)
_run_socket_server_c(port)
def run_rw_mario_gan_server(port, force_download=False, do_build=False):
"""Prepare and run the socket server with the mario gan evaluator (in Python)"""
if do_build:
build_rw_mario_gan_server(force_download=force_download, exclusive_evaluator=True)
_run_socket_server_python(port)
def run_socket_servers(force_download=False, do_build=False):
"""Run socket servers in C and Python"""
if do_build:
build_socket_servers(force_download=force_download)
_run_socket_server_c(socket_server_port_c)
_run_socket_server_python(socket_server_port_python)
def _stop_socket_server(port):
"""Stop the socket server running on the given port"""
try:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) # Create socket
try:
s.connect((socket_server_host, port)) # Connect to the server
except socket.error:
print('No socket server found on port {}'.format(port))
return
s.send('SHUTDOWN'.encode()) # Send request for shutdown
s.close()
print('Stopped socket sever on port {}'.format(port))
except (socket.error, Exception) as e:
print('Error stopping socket server on port {}: {}'.format(port, e))
sys.exit(-1)
def stop_socket_servers(port):
"""Stop the socket servers running on the known ports (in case no port is given) or the given
port"""
ports = [int(port)] if port else socket_server_ports
for p in ports:
_stop_socket_server(p)
def _get_socket_port(suite_name, start_port, current_batch):
"""Returns the used port based on the given parameters
The same function is used in rw_example_experiment.py. If this one changes, the other has to
change too.
"""
port_py_inc = 200
if ('toy-socket' in suite_name) or ('rw-top-trumps' in suite_name):
return start_port + current_batch
elif 'rw-mario-gan' in suite_name:
return start_port + port_py_inc + current_batch
else:
raise ValueError('Suite {} not supported'.format(suite_name))
def build_rw_experiment(package_install_option=[], force_download=False, args=[]):
"""Builds the real-world experiment for the given suite that uses sockets for evaluation
('toy-socket' by default).
This does not also run the experiment, see the run_rw_experiment function.
"""
# These default should match those from rw_example_experiment.py
suite_name = 'toy-socket'
# Parse the arguments
for arg in args:
if arg[:6] == 'suite=':
suite_name = arg[6:]
# Build the right socket server for this suite
if 'toy-socket' in suite_name:
build_toy_socket_server_c()
elif 'rw-top-trumps' in suite_name:
build_rw_top_trumps_server(force_download=force_download)
elif 'rw-mario-gan' in suite_name:
build_rw_mario_gan_server(force_download=force_download)
else:
raise ValueError('Suite {} not supported'.format(suite_name))
# Build Python
build_python(package_install_option=package_install_option)
def run_rw_experiment(package_install_option=[], force_download=False, do_build=True, args=[]):
"""Runs the real-world experiment with the given suite that uses sockets for evaluation
('toy-socket' by default).
Builds the experiment only if so given by the arguments.
First runs the socket server, then the real-world example experiment in Python and finally stops
the socket server.
"""
if do_build:
build_rw_experiment(package_install_option=package_install_option,
force_download=force_download, args=args)
# These defaults should match those from rw_example_experiment.py
suite_name = 'toy-socket'
current_batch = 1
port = start_port = 7000
try:
# Parse the arguments
for arg in args:
if arg[:11] == 'start_port=':
start_port = int(arg[11:])
elif arg[:6] == 'batch=':
current_batch = int(arg[6:])
elif arg[:6] == 'suite=':
suite_name = arg[6:]
# Get the right port for this suite
port = _get_socket_port(suite_name, start_port, current_batch)
# Run the right socket server for this suite
if 'toy-socket' in suite_name:
run_toy_socket_server_c(port=port, do_build=False)
elif 'rw-top-trumps' in suite_name:
run_rw_top_trumps_server(port=port, do_build=False)
elif 'rw-mario-gan' in suite_name:
run_rw_mario_gan_server(port=port, do_build=False)
else:
raise ValueError('Suite {} not supported'.format(suite_name))
time.sleep(3) # Wait a few seconds for the servers to start
# Run the real-world example experiment with the given arguments
python(os.path.join('code-experiments', 'build', 'python'),
['rw_example_experiment.py'] + args)
finally:
# Stop the socket servers
stop_socket_servers(port=port)
################################################################################
## Post processing
def test_postprocessing(all_tests=False, package_install_option=[]):
install_postprocessing(package_install_option=package_install_option)
try:
if all_tests:
# run example experiment to have a recent data set to postprocess:
build_python(package_install_option=package_install_option)
python('code-experiments/build/python/', ['-c', '''
from __future__ import print_function
try:
import example_experiment as ee
except Exception as e:
print(e)
ee.SOLVER = ee.random_search # which is default anyway
for ee.suite_name, ee.observer_options['result_folder'] in [
["bbob-biobj", "RS-bi"], # use a short path for Jenkins
["bbob", "RS-bb"],
["bbob-constrained", "RS-co"],
["bbob-largescale", "RS-la"],
["bbob-mixint", "RS-mi"],
["bbob-biobj-mixint", "RS-bi-mi"]
]:
print(" suite %s" % ee.suite_name, end=' ') # these prints are swallowed
if ee.suite_name in ee.cocoex.known_suite_names:
print("testing into folder %s" % ee.observer_options['result_folder'])
ee.main()
else:
print("is not known")
'''], verbose=_verbosity)
# now run all tests
python('code-postprocessing/cocopp',
['test.py', 'all', sys.executable], verbose=_verbosity)
else:
python('code-postprocessing/cocopp', ['test.py', sys.executable],
verbose=_verbosity)
# also run the doctests in aRTAplots/generate_aRTA_plot.py:
python('code-postprocessing/aRTAplots', ['generate_aRTA_plot.py'], verbose=_verbosity)
except subprocess.CalledProcessError:
sys.exit(-1)
finally:
# always remove folder of previously run experiments:
for s in ['bi', 'bb', 'co', 'la', 'mi', 'bi-mi']:
shutil.rmtree('code-experiments/build/python/exdata/RS-' + s,
ignore_errors=True)
def verify_postprocessing(package_install_option=[]):
install_postprocessing(package_install_option=package_install_option)
# This is not affected by the _verbosity value. Verbose should always be True.
python('code-postprocessing/cocopp', ['preparehtml.py', '-v'], verbose=True)
################################################################################
## Pre-processing
def install_preprocessing(package_install_option=[]):
global RELEASE
install_postprocessing(package_install_option=package_install_option)
expand_file(join('code-preprocessing/archive-update', 'setup.py.in'),
join('code-preprocessing/archive-update', 'setup.py'),
{'COCO_VERSION': git_version(pep440=True)})
build_python(package_install_option=package_install_option)
amalgamate(CORE_FILES + ['code-experiments/src/coco_runtime_c.c'],
'code-preprocessing/archive-update/interface/coco.c', RELEASE,
{"COCO_VERSION": git_version(pep440=True)})
expand_file('code-experiments/src/coco.h', 'code-preprocessing/archive-update/interface/coco.h',
{'COCO_VERSION': git_version(pep440=True)})
python('code-preprocessing/archive-update',
['setup.py', 'install'] + package_install_option,
verbose=_verbosity, custom_exception_handler=install_error)
def test_preprocessing(package_install_option=[]):
install_preprocessing(package_install_option=package_install_option)
python('code-preprocessing/archive-update', ['-m', 'pytest'], verbose=_verbosity)
python('code-preprocessing/log-reconstruction', ['-m', 'pytest'], verbose=_verbosity)
################################################################################
## Global
def build(package_install_option=[]):
builders = [
build_c,
# build_matlab,
build_python(package_install_option=package_install_option),
build_java,
]
for builder in builders:
try:
builder()
except:
failed = str(builder)
print("============")
print(' ERROR: %s failed, call "./do.py %s" individually'
% (failed, failed[failed.find('build_'):].split()[0]) +
' for a more detailed error report')
print("============")
def run_all(package_install_option=[]):
run_c()
run_java()
run_python(package_install_option=package_install_option)
def test():
test_c()
test_java()
test_python()
def verbose(args):
"""Calls main(args) in verbose mode for additional output"""
global _verbosity
_verbosity = True
main(args)
_verbosity = False
def quiet(args):
"""Calls main(args) in quiet mode for less output during c builds"""
global _build_verbosity
_build_verbosity = False
main(args)
_build_verbosity = True
def silent(args):
"""calls `main(args)` with redirected output to keep the console clean"""
# redirect stdout and call main
filename = '_check_output'
raised = None
stdout = sys.stdout
with open(filename, 'w') as out:
sys.stdout = out
try:
main(args)
except BaseException as raised:
pass
sys.stdout = stdout
# check whether an error occured
error = False
for line in open(filename, 'r').readlines():
if line.startswith('ERR') or not line[0] in 'ABCDEFGHIJKLMNOPQRSTUVWXYZ':
error = True
break
if error:
for line in open(filename, 'r').readlines():
print(line, end="")
if raised:
raise raised
def help():
print("""COCO framework bootstrap tool. Version %s
Usage: do.py <command> <arguments>
If you want to get going as quickly as | |
import coopihc
from coopihc.space import StateElement, State, StateNotContainedError
import gym
import numpy
import sys
import copy
_str = sys.argv[1]
# -------- Correct assigment
if _str == "correct" or _str == "all":
x = StateElement(
values=[
numpy.array([1]).reshape(
1,
),
2,
3,
],
spaces=[
coopihc.space.Box(-1, 1, shape=(1,)),
coopihc.space.Discrete(3),
coopihc.space.Discrete(6),
],
possible_values=[[None], [None], [-6, -5, -4, -3, -2, -1]],
)
if _str == "non-list":
x = StateElement(
values=numpy.array([-0.5]).reshape(
1,
),
spaces=coopihc.space.Box(-1, 1, shape=(1,)),
possible_values=None,
)
y = StateElement(
values=0, spaces=coopihc.space.Discrete(3), possible_values=[1, 2, 3]
)
if _str == "array":
x = StateElement(
values=[numpy.array([0, 0])],
spaces=[coopihc.space.Box(-1, 1, shape=(2,))],
possible_values=[None],
)
# --------- non rigorous assigment
if _str == "non-rigorous" or _str == "all":
x = StateElement(
values=[1, 2, 3],
spaces=[
coopihc.space.Box(-1, 1, shape=(1,)),
coopihc.space.Discrete(3),
coopihc.space.Discrete(6),
],
possible_values=[[None], [None], [-6, -5, -4, -3, -2, -1]],
)
y = StateElement(
values=[1, 2, 3],
spaces=[
coopihc.space.Box(-1, 1, shape=(1,)),
coopihc.space.Discrete(3),
coopihc.space.Discrete(6),
],
possible_values=None,
)
# ---------- nested values
if _str == "nested" or _str == "all":
x = StateElement(
values=[[1], [2], [3]],
spaces=[
coopihc.space.Box(-1, 1, shape=(1,)),
coopihc.space.Discrete(3),
coopihc.space.Discrete(6),
],
possible_values=[[None], [None], [-6, -5, -4, -3, -2, -1]],
)
# -------------- accessing values
if _str == "access" or _str == "all":
x = StateElement(
values=[[1], [2], [3]],
spaces=[
coopihc.space.Box(-1, 1, shape=(1,)),
coopihc.space.Discrete(3),
coopihc.space.Discrete(6),
],
possible_values=[[None], [None], [-6, -5, -4, -3, -2, -1]],
)
x["values"]
x["spaces"]
x["possible_values"]
x.values
x["values"] = [1, 1, 1]
# ------ normal reset
if _str == "reset" or _str == "all":
x = StateElement(
values=[
numpy.array([1]).reshape(
1,
),
2,
3,
],
spaces=[
coopihc.space.Box(-1, 1, shape=(1,)),
coopihc.space.Discrete(3),
coopihc.space.Discrete(6),
],
possible_values=[[None], [None], [-6, -5, -4, -3, -2, -1]],
)
x.reset()
# -------- forced reset
if _str == "forced-reset" or _str == "all":
x = StateElement(
values=[
numpy.array([1]).reshape(
1,
),
2,
3,
],
spaces=[
coopihc.space.Box(-1, 1, shape=(1,)),
coopihc.space.Discrete(3),
coopihc.space.Discrete(6),
],
possible_values=[[None], [None], [-6, -5, -4, -3, -2, -1]],
)
reset_dic = {"values": [-1 / 2, -0, 0]}
x.reset(dic=reset_dic)
if _str == "gethv" or _str == "all":
x = StateElement(
values=[
numpy.array([1]).reshape(
1,
),
2,
3,
],
spaces=[
coopihc.space.Box(-1, 1, shape=(1,)),
coopihc.space.Discrete(3),
coopihc.space.Discrete(6),
],
possible_values=[[None], [None], [-6, -5, -4, -3, -2, -1]],
)
x.get_human_values()
s = State()
s["substate"] = x
s["substate"]["human_values"]
y = StateElement(
values=[None, None, None],
spaces=[
coopihc.space.Box(-1, 1, shape=(1,)),
coopihc.space.Discrete(3),
coopihc.space.Discrete(6),
],
possible_values=[[None], [None], [-6, -5, -4, -3, -2, -1]],
)
q = State()
q["substate"] = y
q["substate"]["human_values"]
u = StateElement(
values=[2], spaces=[coopihc.space.Discrete(5)], possible_values=[None]
)
print(u["human_values"][0])
if _str == "nones" or _str == "all":
x = StateElement(
values=None, spaces=[coopihc.space.Discrete(2)], possible_values=None
)
y = StateElement()
z = StateElement(
values=[None, None],
spaces=[
coopihc.space.Box(low=-numpy.inf, high=numpy.inf, shape=(1,))
for i in range(2)
],
possible_values=None,
)
z["values"] = None
if _str == "iter" or _str == "all":
x = StateElement(
values=[
numpy.array([1]).reshape(
1,
),
2,
3,
],
spaces=[
coopihc.space.Box(-1, 1, shape=(1,)),
coopihc.space.Discrete(3),
coopihc.space.Discrete(6),
],
possible_values=[[None], [None], [-6, -5, -4, -3, -2, -1]],
)
for _x in x:
print(_x)
if _str == "cartesianproduct" or _str == "all":
x = StateElement(
values=[
numpy.array([1]).reshape(
1,
),
2,
3,
],
spaces=[
coopihc.space.Box(-1, 1, shape=(1,)),
coopihc.space.Discrete(3),
coopihc.space.Discrete(6),
],
possible_values=[[None], [None], [-6, -5, -4, -3, -2, -1]],
)
for _x in x.cartesian_product():
print(_x)
if _str == "repr" or _str == "all":
x = StateElement(
values=[
numpy.array([1]).reshape(
1,
),
2,
3,
],
spaces=[
coopihc.space.Box(-1, 1, shape=(1,)),
coopihc.space.Discrete(3),
coopihc.space.Discrete(15),
],
possible_values=[[None], [None], [-6 + i for i in range(15)]],
)
y = StateElement(
values=[
numpy.array([1]).reshape(
1,
),
2,
3,
],
spaces=[
coopihc.space.Box(-1, 1, shape=(1,)),
coopihc.space.Discrete(3),
coopihc.space.Discrete(6),
],
possible_values=[[None], [None], [-6, -5, -4, -3, -2, -1]],
)
z = StateElement(
values=[
numpy.array(
[[1 / 3, 1 / 3, 1 / 3], [3 / 5, 3 / 5, 3 / 5], [1 / 7, 1 / 8, 1 / 9]]
)
],
spaces=[coopihc.space.Box(-1, 1, shape=(3, 3))],
possible_values=[[None]],
)
s = State()
s["element1"] = x
s["element2"] = y
s["element3"] = z
xx = StateElement(
values=[
numpy.array([0, 0]).reshape(
2,
),
2,
3,
],
spaces=[
coopihc.space.Box(-1, 1, shape=(2,)),
coopihc.space.Discrete(3),
coopihc.space.Discrete(6),
],
possible_values=[[None], [None], [-6, -5, -4, -3, -2, -1]],
)
yy = StateElement(
values=[numpy.array([[0, 0], [1, 1]])],
spaces=[coopihc.space.Box(-1, 1, shape=(2, 2))],
possible_values=[[None]],
)
s2 = State()
s2["element1"] = yy
s2["element2"] = xx
S = State()
S["substate1"] = s
S["substate2"] = s2
if _str == "flat" or _str == "all":
x = StateElement(
values=[
numpy.array([1]).reshape(
1,
),
1,
1,
],
spaces=[
coopihc.space.Box(-1, 1, shape=(1,)),
coopihc.space.Discrete(3),
coopihc.space.Discrete(6),
],
possible_values=[[None], [None], [-6, -5, -4, -3, -2, -1]],
)
y = StateElement(
values=[
numpy.array([1]).reshape(
1,
),
2,
2,
],
spaces=[
coopihc.space.Box(-1, 1, shape=(1,)),
coopihc.space.Discrete(3),
coopihc.space.Discrete(6),
],
possible_values=[[None], [None], [-6, -5, -4, -3, -2, -1]],
)
s = State()
s["element1"] = x
s["element2"] = y
xx = StateElement(
values=[
numpy.array([1]).reshape(
1,
),
3,
3,
],
spaces=[
coopihc.space.Box(-1, 1, shape=(1,)),
coopihc.space.Discrete(4),
coopihc.space.Discrete(6),
],
possible_values=[[None], [None], [-6, -5, -4, -3, -2, -1]],
)
s2 = State()
s2["element1"] = xx
S = State()
S["substate1"] = s
S["substate2"] = s2
S.flat()
if _str == "len" or _str == "all":
x = StateElement(
values=[
numpy.array([1]).reshape(
1,
),
2,
3,
],
spaces=[
coopihc.space.Box(-1, 1, shape=(1,)),
coopihc.space.Discrete(3),
coopihc.space.Discrete(6),
],
possible_values=[[None], [None], [-6, -5, -4, -3, -2, -1]],
)
len(x)
if _str == "filter" or _str == "all":
x = StateElement(
values=[
numpy.array([1]).reshape(
1,
),
1,
1,
],
spaces=[
coopihc.space.Box(-1, 1, shape=(1,)),
coopihc.space.Discrete(3),
coopihc.space.Discrete(6),
],
possible_values=[[None], [None], [-6, -5, -4, -3, -2, -1]],
)
y = StateElement(
values=[
numpy.array([1]).reshape(
1,
),
2,
2,
],
spaces=[
coopihc.space.Box(-1, 1, shape=(1,)),
coopihc.space.Discrete(3),
coopihc.space.Discrete(6),
],
possible_values=[[None], [None], [-6, -5, -4, -3, -2, -1]],
)
s = State()
s["e1"] = x
s["e2"] = y
xx = StateElement(
values=[
numpy.array([1]).reshape(
1,
),
3,
3,
],
spaces=[
coopihc.space.Box(-1, 1, shape=(1,)),
coopihc.space.Discrete(4),
coopihc.space.Discrete(6),
],
possible_values=[[None], [None], [-6, -5, -4, -3, -2, -1]],
)
s2 = State()
s2["element1"] = xx
S = State()
S["substate1"] = s
S["substate2"] = s2
from collections import OrderedDict
ordereddict = OrderedDict({"substate1": OrderedDict({"e1": 0, "e2": 0})})
ns1 = S.filter("values", filterdict=ordereddict)
ns2 = S.filter("spaces", filterdict=ordereddict)
ns3 = S.filter("possible_values", filterdict=ordereddict)
ns4 = S.filter("human_values", filterdict=ordereddict)
ns5 = S.filter("values")
ns6 = S.filter("spaces")
ns7 = S.filter("possible_values")
ns8 = S.filter("human_values")
if _str == "copy" or _str == "all":
import copy
x = StateElement(
values=[
numpy.array([1]).reshape(
1,
),
1,
1,
],
spaces=[
coopihc.space.Box(-1, 1, shape=(1,)),
coopihc.space.Discrete(3),
coopihc.space.Discrete(6),
],
possible_values=[[None], [None], [-6, -5, -4, -3, -2, -1]],
)
y = StateElement(
values=[
numpy.array([1]).reshape(
1,
),
2,
2,
],
spaces=[
coopihc.space.Box(-1, 1, shape=(1,)),
coopihc.space.Discrete(3),
coopihc.space.Discrete(6),
],
possible_values=[[None], [None], [-6, -5, -4, -3, -2, -1]],
)
s = State()
s["e1"] = x
s["e2"] = y
xx = StateElement(
values=[
numpy.array([1]).reshape(
1,
),
3,
3,
],
spaces=[
coopihc.space.Box(-1, 1, shape=(1,)),
coopihc.space.Discrete(4),
coopihc.space.Discrete(6),
],
possible_values=[[None], [None], [-6, -5, -4, -3, -2, -1]],
)
s2 = State()
s2["element1"] = xx
S = State()
S["substate1"] = s
S["substate2"] = s2
_copy = copy.copy(S)
_deepcopy = copy.deepcopy(S)
S["substate1"]["e1"]["values"] = [0, 0, 0]
if _str == "neg" or _str == "all":
x = StateElement(
values=[
numpy.array([1]).reshape(
1,
)
],
spaces=[coopihc.space.Box(-1, 1, shape=(1,))],
possible_values=[[None]],
clipping_mode="warning",
)
print(-x)
if _str == "add" or _str == "all":
x = StateElement(
values=[
numpy.array([1]).reshape(
1,
),
1,
1,
],
spaces=[
coopihc.space.Box(-1, 1, shape=(1,)),
coopihc.space.Discrete(3),
coopihc.space.Discrete(6),
],
possible_values=[[None], [None], [-6, -5, -4, -3, -2, -1]],
clipping_mode="warning",
)
y = [-1, -1, 2]
z = -1
print(x + y)
print(x + z)
a = StateElement(
values=[numpy.array([0.2177283, 0.11400087])],
spaces=[coopihc.space.Box(low=-numpy.inf, high=numpy.inf, shape=(2,))],
possible_values=[None],
clipping_mode="warning",
)
b = numpy.array([0.12823329, -0.10512559])
print(a + b)
print(b + a)
print(a - b)
print(b - a)
x = StateElement(
values=[
numpy.array([0.5]).reshape(
1,
)
],
spaces=[coopihc.space.Box(-1, 1, shape=(1,))],
possible_values=[[None]],
)
y = StateElement(
values=[
numpy.array([0.5]).reshape(
1,
)
],
spaces=[coopihc.space.Box(-1, 1, shape=(1,))],
possible_values=[[None]],
)
print(x + y)
print(x - y)
d = x + y
print(d)
d = x - y
print(d)
if _str == "mul" or _str == "all":
a = StateElement(
values=[numpy.array([0.2177283, 0.11400087])],
spaces=[coopihc.space.Box(low=-100, high=100, shape=(2,))],
possible_values=[None],
)
b = numpy.array([0.12823329, -0.10512559])
c = 1
print(a * b)
print(a * c)
print(b * a)
print(c * a)
if _str == "cast" or _str == "all":
# n to Box
for i in range(6):
print(i)
x = StateElement(
values=[i], spaces=[coopihc.space.Discrete(6)], possible_values=[[None]]
)
y = StateElement(
values=[None],
spaces=[coopihc.space.Box(-1, 1, shape=(1,))],
possible_values=[None],
)
ret = x.cast(y)
print(y)
y = ret.cast(x)
print(y)
x | |
pool members on each BIG-IP
monitor_states = \
hostbigip.pool.get_members_monitor_status(
name=pool['id'],
folder=pool['tenant_id'],
config_mode=self.conf.icontrol_config_mode
)
for member in service['members']:
if member['status'] in update_if_status:
# create the entry for this
# member in the return status
# dictionary set to ACTIVE
if not member['id'] in members:
members[member['id']] = \
{'status': plugin_const.INACTIVE}
# check if it down or up by monitor
# and update the status
for state in monitor_states:
# matched the pool member
# by address and port number
if member['address'] == \
strip_domain_address(
state['addr']) and \
int(member['protocol_port']) == \
int(state['port']):
# if the monitor says member is up
if state['state'] == \
'MONITOR_STATUS_UP' or \
state['state'] == \
'MONITOR_STATUS_UNCHECKED':
# set ACTIVE as long as the
# status was not set to 'DOWN'
# on another BIG-IP
if members[
member['id']]['status'] != \
'DOWN':
if member['admin_state_up']:
members[member['id']][
'status'] = \
plugin_const.ACTIVE
else:
members[member['id']][
'status'] = \
plugin_const.INACTIVE
else:
members[member['id']]['status'] = \
plugin_const.DOWN
stats['members'] = members
return stats
@serialized('remove_orphans')
def remove_orphans(self, all_loadbalancers):
"""Remove out-of-date configuration on big-ips """
existing_tenants = []
existing_lbs = []
for loadbalancer in all_loadbalancers:
existing_tenants.append(loadbalancer['tenant_id'])
existing_lbs.append(loadbalancer['lb_id'])
for bigip in self.get_all_bigips():
bigip.pool.purge_orphaned_pools(existing_lbs)
for bigip in self.get_all_bigips():
bigip.system.purge_orphaned_folders_contents(existing_tenants)
for bigip in self.get_all_bigips():
bigip.system.purge_orphaned_folders(existing_tenants)
def fdb_add(self, fdb):
# Add (L2toL3) forwarding database entries
self.remove_ips_from_fdb_update(fdb)
for bigip in self.get_all_bigips():
self.network_builder.add_bigip_fdb(bigip, fdb)
def fdb_remove(self, fdb):
# Remove (L2toL3) forwarding database entries
self.remove_ips_from_fdb_update(fdb)
for bigip in self.get_all_bigips():
self.network_builder.remove_bigip_fdb(bigip, fdb)
def fdb_update(self, fdb):
# Update (L2toL3) forwarding database entries
self.remove_ips_from_fdb_update(fdb)
for bigip in self.get_all_bigips():
self.network_builder.update_bigip_fdb(bigip, fdb)
# remove ips from fdb update so we do not try to
# add static arps for them because we do not have
# enough information to determine the route domain
def remove_ips_from_fdb_update(self, fdb):
for network_id in fdb:
network = fdb[network_id]
mac_ips_by_vtep = network['ports']
for vtep in mac_ips_by_vtep:
mac_ips = mac_ips_by_vtep[vtep]
for mac_ip in mac_ips:
mac_ip[1] = None
def tunnel_update(self, **kwargs):
# Tunnel Update from Neutron Core RPC
pass
def tunnel_sync(self):
# Only sync when supported types are present
if not [i for i in self.agent_configurations['tunnel_types']
if i in ['gre', 'vxlan']]:
return False
tunnel_ips = []
for bigip in self.get_all_bigips():
if bigip.local_ip:
tunnel_ips.append(bigip.local_ip)
self.network_builder.tunnel_sync(tunnel_ips)
# Tunnel sync sent.
return False
@serialized('sync')
@is_connected
def sync(self, service):
"""Sync service defintion to device"""
# plugin_rpc may not be set when unit testing
if self.plugin_rpc:
# Get the latest service. It may have changed.
service = self.plugin_rpc.get_service_by_loadbalancer_id(
service['loadbalancer']['id']
)
if service['loadbalancer']:
self._common_service_handler(service)
else:
LOG.debug("Attempted sync of deleted pool")
@serialized('backup_configuration')
@is_connected
def backup_configuration(self):
# Save Configuration on Devices
for bigip in self.get_all_bigips():
LOG.debug('_backup_configuration: saving device %s.'
% bigip.hostname)
self.cluster_manager.save_config(bigip)
def _service_exists(self, service):
# Returns whether the bigip has a pool for the service
if not service['loadbalancer']:
return False
loadbalancer = service['loadbalancer']
bigip = self.get_bigip()
folder_name = self.service_adapter.get_folder_name(
loadbalancer['tenant_id']
)
# Does the tenant folder exist?
if not self.system_helper.folder_exists(bigip, folder_name):
return False
# Ensure that each virtual service exists.
# TODO(<NAME>): check the listener status instead, this can be
# used to detemine the health of the service.
for listener in service['listeners']:
svc = {'loadbalancer': loadbalancer,
'listener': listener}
if not self.lbaas_builder.listener_exists(svc, bigip):
return False
return True
def _common_service_handler(self, service, delete_partition=False):
# Assure that the service is configured on bigip(s)
start_time = time()
if not service['loadbalancer']:
LOG.error("_common_service_handler: Service loadbalancer is None")
return
try:
self.tenant_manager.assure_tenant_created(service)
LOG.debug(" _assure_tenant_created took %.5f secs" %
(time() - start_time))
traffic_group = self.service_to_traffic_group(service)
LOG.debug("XXXXXXXXXX: traffic group created ")
# This loop will only run once. Using while as a control-flow
# mechanism to flatten out the code by allowing breaks.
while (self.network_builder):
if not self.disconnected_service.is_service_connected(service):
if self.disconnected_service_polling.enabled:
# Hierarchical port-binding mode:
# Skip network setup if the service is not connected.
break
else:
LOG.error("Misconfiguration: Segmentation ID is "
"missing from the service definition. "
"Please check the setting for "
"f5_network_segment_physical_network in "
"f5-openstack-agent.ini in case neutron "
"is operating in Hierarhical Port Binding "
"mode.")
service['loadbalancer']['provisioning_status'] = \
plugin_const.ERROR
raise f5ex.MissingNetwork("Missing segmentation id")
start_time = time()
try:
self.network_builder.prep_service_networking(
service, traffic_group)
except Exception as exc:
LOG.error("Exception: icontrol_driver: %s", exc.message)
service['loadbalancer']['provisioning_status'] = \
plugin_const.ERROR
raise
if time() - start_time > .001:
LOG.debug(" _prep_service_networking "
"took %.5f secs" % (time() - start_time))
break
all_subnet_hints = {}
LOG.debug("XXXXXXXXXX: getting bigip configs")
for bigip in self.get_config_bigips():
# check_for_delete_subnets:
# keep track of which subnets we should check to delete
# for a deleted vip or member
# do_not_delete_subnets:
# If we add an IP to a subnet we must not delete the subnet
all_subnet_hints[bigip.device_name] = \
{'check_for_delete_subnets': {},
'do_not_delete_subnets': []}
LOG.debug("XXXXXXXXX: Pre assure service")
self.lbaas_builder.assure_service(service,
traffic_group,
all_subnet_hints)
LOG.debug("XXXXXXXXX: Post assure service")
if self.network_builder:
start_time = time()
self.network_builder.post_service_networking(
service, all_subnet_hints)
LOG.debug(" _post_service_networking took %.5f secs" %
(time() - start_time))
# only delete partition if loadbalancer is being deleted
if delete_partition:
self.tenant_manager.assure_tenant_cleanup(service,
all_subnet_hints)
except Exception as err:
LOG.exception(err)
finally:
self._update_service_status(service)
def _update_service_status(self, service):
"""Update status of objects in OpenStack """
if not self.plugin_rpc:
LOG.error("Cannot update status in Neutron without "
"RPC handler.")
return
if 'members' in service:
# Call update_members_status
self._update_member_status(service['members'])
if 'healthmonitors' in service:
# Call update_monitor_status
self._update_health_monitor_status(
service['healthmonitors']
)
if 'pools' in service:
# Call update_pool_status
self._update_pool_status(
service['pools']
)
if 'listeners' in service:
# Call update_listener_status
self._update_listener_status(service)
self._update_loadbalancer_status(service)
def _update_member_status(self, members):
"""Update member status in OpenStack """
for member in members:
if 'provisioning_status' in member:
provisioning_status = member['provisioning_status']
if (provisioning_status == plugin_const.PENDING_CREATE or
provisioning_status == plugin_const.PENDING_UPDATE):
self.plugin_rpc.update_member_status(
member['id'],
plugin_const.ACTIVE,
lb_const.ONLINE
)
elif provisioning_status == plugin_const.PENDING_DELETE:
self.plugin_rpc.member_destroyed(
member['id'])
elif provisioning_status == plugin_const.ERROR:
self.plugin_rpc.update_member_status(member['id'])
def _update_health_monitor_status(self, health_monitors):
"""Update pool monitor status in OpenStack """
for health_monitor in health_monitors:
if 'provisioning_status' in health_monitor:
provisioning_status = health_monitor['provisioning_status']
if (provisioning_status == plugin_const.PENDING_CREATE or
provisioning_status == plugin_const.PENDING_UPDATE):
self.plugin_rpc.update_health_monitor_status(
health_monitor['id'],
plugin_const.ACTIVE,
lb_const.ONLINE
)
elif provisioning_status == plugin_const.PENDING_DELETE:
self.plugin_rpc.health_monitor_destroyed(
health_monitor['id'])
elif provisioning_status == plugin_const.ERROR:
self.plugin_rpc.update_health_monitor_status(
health_monitor['id'])
@log_helpers.log_method_call
def _update_pool_status(self, pools):
"""Update pool status in OpenStack """
for pool in pools:
if 'provisioning_status' in pool:
provisioning_status = pool['provisioning_status']
if (provisioning_status == plugin_const.PENDING_CREATE or
provisioning_status == plugin_const.PENDING_UPDATE):
self.plugin_rpc.update_pool_status(
pool['id'],
plugin_const.ACTIVE,
lb_const.ONLINE
)
elif provisioning_status == plugin_const.PENDING_DELETE:
self.plugin_rpc.pool_destroyed(
pool['id'])
elif provisioning_status == plugin_const.ERROR:
self.plugin_rpc.update_pool_status(pool['id'])
@log_helpers.log_method_call
def _update_listener_status(self, service):
"""Update listener status in OpenStack """
listeners = service['listeners']
for listener in listeners:
if 'provisioning_status' in listener:
provisioning_status = listener['provisioning_status']
if (provisioning_status == plugin_const.PENDING_CREATE or
provisioning_status == plugin_const.PENDING_UPDATE):
self.plugin_rpc.update_listener_status(
listener['id'],
plugin_const.ACTIVE,
listener['operating_status']
)
elif provisioning_status == plugin_const.PENDING_DELETE:
self.plugin_rpc.listener_destroyed(
listener['id'])
elif provisioning_status == plugin_const.ERROR:
self.plugin_rpc.update_listener_status(
listener['id'],
provisioning_status,
lb_const.OFFLINE)
@log_helpers.log_method_call
def _update_loadbalancer_status(self, service):
"""Update loadbalancer status in OpenStack """
loadbalancer = service['loadbalancer']
provisioning_status = loadbalancer['provisioning_status']
if (provisioning_status == plugin_const.PENDING_CREATE or
provisioning_status == plugin_const.PENDING_UPDATE):
operating_status = (lb_const.ONLINE)
if (self.disconnected_service_polling.enabled and
not
self.disconnected_service.is_service_connected(service)):
# operational status will be set by the disconnected
# service polling thread if that mode is enabled
operating_status = lb_const.OFFLINE
self.plugin_rpc.update_loadbalancer_status(
loadbalancer['id'],
plugin_const.ACTIVE,
operating_status)
elif provisioning_status == plugin_const.PENDING_DELETE:
self.plugin_rpc.loadbalancer_destroyed(
loadbalancer['id'])
elif provisioning_status == plugin_const.ERROR:
self.plugin_rpc.update_loadbalancer_status(
loadbalancer['id'],
provisioning_status,
lb_const.OFFLINE)
else:
LOG.error('Loadbalancer provisioning status is invalid')
def service_to_traffic_group(self, service):
# Hash service tenant id to index of traffic group
# return which iControlDriver.__traffic_group that tenant is "in?"
return self.tenant_to_traffic_group(
service['loadbalancer']['tenant_id'])
def tenant_to_traffic_group(self, tenant_id):
# Hash tenant id to index of traffic group
hexhash = hashlib.md5(tenant_id).hexdigest()
tg_index = int(hexhash, 16) % len(self.__traffic_groups)
return self.__traffic_groups[tg_index]
def get_bigip(self):
# Get one consistent big-ip
# As implemented I think this always returns the "first" bigip
# without any HTTP traffic? CONFIRMED: __bigips are mgmt_rts
hostnames = sorted(self.__bigips)
for i in range(len(hostnames)): # C-style make Pythonic.
try:
bigip = self.__bigips[hostnames[i]] # Calling devices?!
return bigip
except urllib2.URLError:
pass
raise urllib2.URLError('cannot communicate to any bigips')
def get_bigip_hosts(self):
# Get all big-ips hostnames under management
return self.__bigips
def get_all_bigips(self):
# Get all big-ips under management
return self.__bigips.values()
def get_config_bigips(self):
# Return a list of big-ips that need to be configured.
return self.get_all_bigips()
def get_inbound_throughput(self, bigip, global_statistics=None):
return self.stat_helper.get_inbound_throughput(
bigip, global_stats=global_statistics)
def get_outbound_throughput(self, bigip, global_statistics=None):
return self.stat_helper.get_outbound_throughput(
bigip, global_stats=global_statistics)
def get_throughput(self, bigip=None, global_statistics=None):
return self.stat_helper.get_throughput(
bigip, global_stats=global_statistics)
def get_active_connections(self, bigip=None, global_statistics=None):
return self.stat_helper.get_active_connection_count(
bigip, global_stats=global_statistics)
def get_ssltps(self, bigip=None, global_statistics=None):
return self.stat_helper.get_active_SSL_TPS(
bigip, global_stats=global_statistics)
def get_node_count(self, bigip=None, global_statistics=None):
return len(bigip.tm.ltm.nodes.get_collection())
def get_clientssl_profile_count(self, | |
<reponame>mkhalil8/hnn-core
"""Network class."""
# Authors: <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
import itertools as it
from copy import deepcopy
import numpy as np
from .drives import _drive_cell_event_times
from .drives import _get_target_properties, _add_drives_from_params
from .drives import _check_drive_parameter_values, _check_poisson_rates
from .cells_default import pyramidal, basket
from .params import _long_name, _short_name
from .viz import plot_cells
from .externals.mne import _validate_type, _check_option
from .extracellular import ExtracellularArray
from .check import _check_gids, _gid_to_type, _string_input_to_list
def _create_cell_coords(n_pyr_x, n_pyr_y, zdiff, inplane_distance):
"""Creates coordinate grid and place cells in it.
Parameters
----------
n_pyr_x : int
The number of Pyramidal cells in x direction.
n_pyr_y : int
The number of Pyramidal cells in y direction.
zdiff : float
Expressed as a positive DEPTH of L2 relative to L5 pyramidal cell
somas, where L5 is defined to lie at z==0. Interlaminar weight/delay
calculations (lamtha) are not affected. The basket cells are
arbitrarily placed slightly above (L5) and slightly below (L2) their
respective pyramidal cell layers.
inplane_distance : float
The grid spacing of pyramidal cells (in um). Note that basket cells are
placed in an uneven formation. Each one of them lies on a grid point
together with a pyramidal cell, though (overlapping).
Returns
-------
pos_dict : dict of list of tuple (x, y, z)
Dictionary containing coordinate positions.
Keys are 'L2_pyramidal', 'L5_pyramidal', 'L2_basket', 'L5_basket',
'common', or any of the elements of the list p_unique_keys
Notes
-----
Common positions are all located at origin.
Sort of a hack bc of redundancy
"""
pos_dict = dict()
# PYRAMIDAL CELLS
xxrange = np.arange(n_pyr_x) * inplane_distance
yyrange = np.arange(n_pyr_y) * inplane_distance
pos_dict['L5_pyramidal'] = [
pos for pos in it.product(xxrange, yyrange, [0])]
pos_dict['L2_pyramidal'] = [
pos for pos in it.product(xxrange, yyrange, [zdiff])]
# BASKET CELLS
xzero = np.arange(0, n_pyr_x, 3) * inplane_distance
xone = np.arange(1, n_pyr_x, 3) * inplane_distance
# split even and odd y vals
yeven = np.arange(0, n_pyr_y, 2) * inplane_distance
yodd = np.arange(1, n_pyr_y, 2) * inplane_distance
# create general list of x,y coords and sort it
coords = [pos for pos in it.product(
xzero, yeven)] + [pos for pos in it.product(xone, yodd)]
coords_sorted = sorted(coords, key=lambda pos: pos[1])
# append the z value for position for L2 and L5
# print(len(coords_sorted))
pos_dict['L5_basket'] = [(pos_xy[0], pos_xy[1], 0.2 * zdiff) for
pos_xy in coords_sorted]
pos_dict['L2_basket'] = [(pos_xy[0], pos_xy[1], 0.8 * zdiff) for
pos_xy in coords_sorted]
# ORIGIN
# origin's z component isn't really used in
# calculating distance functions from origin
# these will be forced as ints!
origin_x = xxrange[int((len(xxrange) - 1) // 2)]
origin_y = yyrange[int((len(yyrange) - 1) // 2)]
origin_z = np.floor(zdiff / 2)
origin = (origin_x, origin_y, origin_z)
# save the origin for adding external drives later
pos_dict['origin'] = origin
return pos_dict
def _connection_probability(conn, probability, conn_seed=3):
"""Remove/keep a random subset of connections.
Parameters
----------
conn : Instance of _Connectivity object
Object specifying the biophysical parameters and src target pairs
of a specific connection class. Function modifies conn in place.
probability : float
Probability of connection between any src-target pair.
Defaults to 1.0 producing an all-to-all pattern.
conn_seed : int
Optional initial seed for random number generator (default: 3).
Used to randomly remove connections when probablity < 1.0.
Notes
-----
num_srcs and num_targets are not updated after pruning connections.
These variables are meant to describe the set of original connections
before they are randomly removed.
The probability attribute will store the most recent value passed to
this function. As such, this number does not accurately describe the
connections probability of the original set after successive calls.
"""
# Random number generator for random connection selection
rng = np.random.default_rng(conn_seed)
_validate_type(probability, float, 'probability')
if probability <= 0.0 or probability >= 1.0:
raise ValueError('probability must be in the range (0,1)')
# Flatten connections into a list of targets.
all_connections = np.concatenate(
[target_src_pair for
target_src_pair in conn['gid_pairs'].values()])
n_connections = np.round(
len(all_connections) * probability).astype(int)
# Select a random subset of connections to retain.
new_connections = rng.choice(
range(len(all_connections)), n_connections, replace=False)
remove_srcs = list()
connection_idx = 0
for src_gid, target_src_pair in conn['gid_pairs'].items():
target_new = list()
for target_gid in target_src_pair:
if connection_idx in new_connections:
target_new.append(target_gid)
connection_idx += 1
# Update targets for src_gid
if target_new:
conn['gid_pairs'][src_gid] = target_new
else:
remove_srcs.append(src_gid)
# Remove src_gids with no targets
for src_gid in remove_srcs:
conn['gid_pairs'].pop(src_gid)
def pick_connection(net, src_gids=None, target_gids=None,
loc=None, receptor=None):
"""Returns indices of connections that match search parameters.
Parameters
----------
net : Instance of Network object
The Network object
src_gids : str | int | range | list of int | None
Identifier for source cells. Passing str arguments
('L2_pyramidal', 'L2_basket', 'L5_pyramidal', 'L5_basket') is
equivalent to passing a list of gids for the relvant cell type.
source - target connections are made in an all-to-all pattern.
target_gids : str | int | range | list of int | None
Identifer for targets of source cells. Passing str arguments
('L2_pyramidal', 'L2_basket', 'L5_pyramidal', 'L5_basket') is
equivalent to passing a list of gids for the relvant cell type.
source - target connections are made in an all-to-all pattern.
loc : str | list of str | None
Location of synapse on target cell. Must be
'proximal', 'distal', or 'soma'. Note that inhibitory synapses
(receptor='gabaa' or 'gabab') of L2 pyramidal neurons are only
valid loc='soma'.
receptor : str | list of str | None
Synaptic receptor of connection. Must be one of:
'ampa', 'nmda', 'gabaa', or 'gabab'.
Returns
-------
conn_indices : list of int
List of indices corresponding to items in net.connectivity.
Connection indices are included if any of the provided parameter
values are present in a connection.
Notes
-----
Passing a list of values to a single parameter corresponds to a
logical OR operation across indices. For example,
loc=['distal', 'proximal'] returns all connections that target
distal or proximal dendrites.
Passing multiple parameters corresponds to a logical AND operation.
For example, net.pick_connection(loc='distal', receptor='ampa')
returns only the indices of connections that target the distal
dendrites and have ampa receptors.
"""
# Convert src and target gids to lists
valid_srcs = list(net.gid_ranges.keys()) # includes drives as srcs
valid_targets = list(net.cell_types.keys())
src_gids = _check_gids(src_gids, net.gid_ranges,
valid_srcs, 'src_gids')
target_gids = _check_gids(target_gids, net.gid_ranges,
valid_targets, 'target_gids')
_validate_type(loc, (str, list, None), 'loc', 'str, list, or None')
_validate_type(receptor, (str, list, None), 'receptor',
'str, list, or None')
valid_loc = ['proximal', 'distal', 'soma']
valid_receptor = ['ampa', 'nmda', 'gabaa', 'gabab']
# Convert receptor and loc to list
loc = _string_input_to_list(loc, valid_loc, 'loc')
receptor = _string_input_to_list(receptor, valid_receptor, 'receptor')
# Create lookup dictionaries
src_dict, target_dict = dict(), dict()
loc_dict, receptor_dict = dict(), dict()
for conn_idx, conn in enumerate(net.connectivity):
# Store connections matching each src_gid
for src_gid in conn['src_gids']:
if src_gid in src_dict:
src_dict[src_gid].append(conn_idx)
else:
src_dict[src_gid] = [conn_idx]
# Store connections matching each target_gid
for target_gid in conn['target_gids']:
if target_gid in target_dict:
target_dict[target_gid].append(conn_idx)
else:
target_dict[target_gid] = [conn_idx]
# Store connections matching each location
if conn['loc'] in loc_dict:
loc_dict[conn['loc']].append(conn_idx)
else:
loc_dict[conn['loc']] = [conn_idx]
# Store connections matching each receptor
if conn['receptor'] in receptor_dict:
receptor_dict[conn['receptor']].append(conn_idx)
else:
receptor_dict[conn['receptor']] = [conn_idx]
# Look up conn indeces that match search terms and add to set.
conn_set = set()
search_pairs = [(src_gids, src_dict), (target_gids, target_dict),
(loc, loc_dict), (receptor, receptor_dict)]
for search_terms, search_dict in search_pairs:
inner_set = set()
# Union of indices which match inputs for single parameter
for term in search_terms:
inner_set = inner_set.union(search_dict.get(term, list()))
# Intersection across parameters
if conn_set and inner_set:
conn_set = conn_set.intersection(inner_set)
else:
conn_set = conn_set.union(inner_set)
conn_set = list(conn_set)
conn_set.sort()
return conn_set
class Network(object):
"""The Network class.
Parameters
----------
params : dict
The parameters to use for constructing the network.
add_drives_from_params : bool
If True, add drives as defined in the params-dict. NB this is mainly
for backward-compatibility with HNN GUI, and will be deprecated in a
future release. Default: False
legacy_mode : bool
Set to True by default to enable matching HNN GUI output when drives
are added suitably. Will be deprecated in a future release.
Attributes
----------
cell_types : dict
Dictionary containing names of real cell types in the network
(e.g. 'L2_basket') as keys | |
"""Check that `query`ing an `NEODatabase` accurately produces close approaches.
There are a plethora of ways to combine the arguments to `create_filters`, which
correspond to different command-line options. This modules tests the options in
isolation, in pairs, and in more complicated combinations. Althought the tests
are not entirely exhaustive, any implementation that passes all of these tests
is most likely up to snuff.
To run these tests from the project root, run::
$ python3 -m unittest --verbose tests.test_query
These tests should pass when Tasks 3a and 3b are complete.
"""
import datetime
import pathlib
import unittest
from database import NEODatabase
from extract import load_neos, load_approaches
from filters import create_filters
TESTS_ROOT = (pathlib.Path(__file__).parent).resolve()
TEST_NEO_FILE = TESTS_ROOT / 'test-neos-2020.csv'
TEST_CAD_FILE = TESTS_ROOT / 'test-cad-2020.json'
class TestQuery(unittest.TestCase):
# Set longMessage to True to enable lengthy diffs between set comparisons.
longMessage = False
@classmethod
def setUpClass(cls):
cls.neos = load_neos(TEST_NEO_FILE)
cls.approaches = load_approaches(TEST_CAD_FILE)
cls.db = NEODatabase(cls.neos, cls.approaches)
def test_query_all(self):
expected = set(self.approaches)
self.assertGreater(len(expected), 0)
filters = create_filters()
received = set(self.db.query(filters))
self.assertEqual(expected, received, msg="Computed results do not match expected results.")
###############################################
# Single filters and pairs of related filters #
###############################################
def test_query_approaches_on_march_2(self):
date = datetime.date(2020, 3, 2)
expected = set(
approach for approach in self.approaches
if approach.time.date() == date
)
self.assertGreater(len(expected), 0)
filters = create_filters(date=date)
received = set(self.db.query(filters))
self.assertEqual(expected, received, msg="Computed results do not match expected results.")
def test_query_approaches_after_april(self):
start_date = datetime.date(2020, 4, 1)
expected = set(
approach for approach in self.approaches
if start_date <= approach.time.date()
)
self.assertGreater(len(expected), 0)
filters = create_filters(start_date=start_date)
received = set(self.db.query(filters))
self.assertEqual(expected, received, msg="Computed results do not match expected results.")
def test_query_approaches_before_july(self):
end_date = datetime.date(2020, 6, 30)
expected = set(
approach for approach in self.approaches
if approach.time.date() <= end_date
)
self.assertGreater(len(expected), 0)
filters = create_filters(end_date=end_date)
received = set(self.db.query(filters))
self.assertEqual(expected, received, msg="Computed results do not match expected results.")
def test_query_approaches_in_march(self):
start_date = datetime.date(2020, 3, 1)
end_date = datetime.date(2020, 3, 31)
expected = set(
approach for approach in self.approaches
if start_date <= approach.time.date() <= end_date
)
self.assertGreater(len(expected), 0)
filters = create_filters(start_date=start_date, end_date=end_date)
received = set(self.db.query(filters))
self.assertEqual(expected, received, msg="Computed results do not match expected results.")
def test_query_with_conflicting_date_bounds(self):
start_date = datetime.date(2020, 10, 1)
end_date = datetime.date(2020, 4, 1)
expected = set()
filters = create_filters(start_date=start_date, end_date=end_date)
received = set(self.db.query(filters))
self.assertEqual(expected, received, msg="Computed results do not match expected results.")
def test_query_with_bounds_and_a_specific_date(self):
start_date = datetime.date(2020, 2, 1)
date = datetime.date(2020, 3, 2)
end_date = datetime.date(2020, 4, 1)
expected = set(
approach for approach in self.approaches
if approach.time.date() == date
)
self.assertGreater(len(expected), 0)
filters = create_filters(date=date, start_date=start_date, end_date=end_date)
received = set(self.db.query(filters))
self.assertEqual(expected, received, msg="Computed results do not match expected results.")
def test_query_with_max_distance(self):
distance_max = 0.4
expected = set(
approach for approach in self.approaches
if approach.distance <= distance_max
)
self.assertGreater(len(expected), 0)
filters = create_filters(distance_max=distance_max)
received = set(self.db.query(filters))
self.assertEqual(expected, received, msg="Computed results do not match expected results.")
def test_query_with_min_distance(self):
distance_min = 0.1
expected = set(
approach for approach in self.approaches
if distance_min <= approach.distance
)
self.assertGreater(len(expected), 0)
filters = create_filters(distance_min=distance_min)
received = set(self.db.query(filters))
self.assertEqual(expected, received, msg="Computed results do not match expected results.")
def test_query_with_max_distance_and_min_distance(self):
distance_max = 0.4
distance_min = 0.1
expected = set(
approach for approach in self.approaches
if distance_min <= approach.distance <= distance_max
)
self.assertGreater(len(expected), 0)
filters = create_filters(distance_min=distance_min, distance_max=distance_max)
received = set(self.db.query(filters))
self.assertEqual(expected, received, msg="Computed results do not match expected results.")
def test_query_with_max_distance_and_min_distance_conflicting(self):
distance_max = 0.1
distance_min = 0.4
expected = set()
filters = create_filters(distance_min=distance_min, distance_max=distance_max)
received = set(self.db.query(filters))
self.assertEqual(expected, received, msg="Computed results do not match expected results.")
def test_query_with_max_velocity(self):
velocity_max = 20
expected = set(
approach for approach in self.approaches
if approach.velocity <= velocity_max
)
self.assertGreater(len(expected), 0)
filters = create_filters(velocity_max=velocity_max)
received = set(self.db.query(filters))
self.assertEqual(expected, received, msg="Computed results do not match expected results.")
def test_query_with_min_velocity(self):
velocity_min = 10
expected = set(
approach for approach in self.approaches
if velocity_min <= approach.velocity
)
self.assertGreater(len(expected), 0)
filters = create_filters(velocity_min=velocity_min)
received = set(self.db.query(filters))
self.assertEqual(expected, received, msg="Computed results do not match expected results.")
def test_query_with_max_velocity_and_min_velocity(self):
velocity_max = 20
velocity_min = 10
expected = set(
approach for approach in self.approaches
if velocity_min <= approach.velocity <= velocity_max
)
self.assertGreater(len(expected), 0)
filters = create_filters(velocity_min=velocity_min, velocity_max=velocity_max)
received = set(self.db.query(filters))
self.assertEqual(expected, received, msg="Computed results do not match expected results.")
def test_query_with_max_velocity_and_min_velocity_conflicting(self):
velocity_max = 10
velocity_min = 20
expected = set()
filters = create_filters(velocity_min=velocity_min, velocity_max=velocity_max)
received = set(self.db.query(filters))
self.assertEqual(expected, received, msg="Computed results do not match expected results.")
def test_query_with_max_diameter(self):
diameter_max = 1.5
expected = set(
approach for approach in self.approaches
if approach.neo.diameter <= diameter_max
)
self.assertGreater(len(expected), 0)
filters = create_filters(diameter_max=diameter_max)
received = set(self.db.query(filters))
self.assertEqual(expected, received, msg="Computed results do not match expected results.")
def test_query_with_min_diameter(self):
diameter_min = 0.5
expected = set(
approach for approach in self.approaches
if diameter_min <= approach.neo.diameter
)
self.assertGreater(len(expected), 0)
filters = create_filters(diameter_min=diameter_min)
received = set(self.db.query(filters))
self.assertEqual(expected, received, msg="Computed results do not match expected results.")
def test_query_with_max_diameter_and_min_diameter(self):
diameter_max = 1.5
diameter_min = 0.5
expected = set(
approach for approach in self.approaches
if diameter_min <= approach.neo.diameter <= diameter_max
)
self.assertGreater(len(expected), 0)
filters = create_filters(diameter_min=diameter_min, diameter_max=diameter_max)
received = set(self.db.query(filters))
self.assertEqual(expected, received, msg="Computed results do not match expected results.")
def test_query_with_max_diameter_and_min_diameter_conflicting(self):
diameter_max = 0.5
diameter_min = 1.5
expected = set()
filters = create_filters(diameter_min=diameter_min, diameter_max=diameter_max)
received = set(self.db.query(filters))
self.assertEqual(expected, received, msg="Computed results do not match expected results.")
def test_query_with_hazardous(self):
expected = set(
approach for approach in self.approaches
if approach.neo.hazardous
)
self.assertGreater(len(expected), 0)
filters = create_filters(hazardous=True)
received = set(self.db.query(filters))
self.assertEqual(expected, received, msg="Computed results do not match expected results.")
def test_query_with_not_hazardous(self):
expected = set(
approach for approach in self.approaches
if not approach.neo.hazardous
)
self.assertGreater(len(expected), 0)
filters = create_filters(hazardous=False)
received = set(self.db.query(filters))
self.assertEqual(expected, received, msg="Computed results do not match expected results.")
###########################
# Combinations of filters #
###########################
def test_query_approaches_on_march_2_with_max_distance(self):
date = datetime.date(2020, 3, 2)
distance_max = 0.4
expected = set(
approach for approach in self.approaches
if approach.time.date() == date
and approach.distance <= distance_max
)
self.assertGreater(len(expected), 0)
filters = create_filters(date=date, distance_max=distance_max)
received = set(self.db.query(filters))
self.assertEqual(expected, received, msg="Computed results do not match expected results.")
def test_query_approaches_on_march_2_with_min_distance(self):
date = datetime.date(2020, 3, 2)
distance_min = 0.1
expected = set(
approach for approach in self.approaches
if approach.time.date() == date
and distance_min <= approach.distance
)
self.assertGreater(len(expected), 0)
filters = create_filters(date=date, distance_min=distance_min)
received = set(self.db.query(filters))
self.assertEqual(expected, received, msg="Computed results do not match expected results.")
def test_query_approaches_in_march_with_min_distance_and_max_distance(self):
start_date = datetime.date(2020, 3, 1)
end_date = datetime.date(2020, 3, 31)
distance_max = 0.4
distance_min = 0.1
expected = set(
approach for approach in self.approaches
if start_date <= approach.time.date() <= end_date
and distance_min <= approach.distance <= distance_max
)
self.assertGreater(len(expected), 0)
filters = create_filters(
start_date=start_date, end_date=end_date,
distance_min=distance_min, distance_max=distance_max,
)
received = set(self.db.query(filters))
self.assertEqual(expected, received, msg="Computed results do not match expected results.")
def test_query_approaches_in_march_with_distance_bounds_and_max_velocity(self):
start_date = datetime.date(2020, 3, 1)
end_date = datetime.date(2020, 3, 31)
distance_max = 0.4
distance_min = 0.1
velocity_max = 20
expected = set(
approach for approach in self.approaches
if start_date <= approach.time.date() <= end_date
and distance_min <= approach.distance <= distance_max
and approach.velocity <= velocity_max
)
self.assertGreater(len(expected), 0)
filters = create_filters(
start_date=start_date, end_date=end_date,
distance_min=distance_min, distance_max=distance_max,
velocity_max=velocity_max
)
received = set(self.db.query(filters))
self.assertEqual(expected, received, msg="Computed results do not match expected results.")
def test_query_approaches_in_march_with_distance_and_velocity_bounds(self):
start_date = datetime.date(2020, 3, 1)
end_date = datetime.date(2020, 3, 31)
distance_max = 0.4
distance_min = 0.1
velocity_max = 20
velocity_min = 10
expected = set(
approach for approach in self.approaches
if start_date <= approach.time.date() <= end_date
and distance_min <= approach.distance <= distance_max
and velocity_min <= approach.velocity <= velocity_max
)
self.assertGreater(len(expected), 0)
filters = create_filters(
start_date=start_date, end_date=end_date,
distance_min=distance_min, distance_max=distance_max,
velocity_min=velocity_min, velocity_max=velocity_max
)
received = set(self.db.query(filters))
self.assertEqual(expected, received, msg="Computed results do not match expected results.")
def test_query_approaches_in_spring_with_distance_and_velocity_bounds_and_max_diameter(self):
start_date = datetime.date(2020, 3, 1)
end_date = datetime.date(2020, 5, 31)
distance_max = 0.5
distance_min = 0.05
velocity_max = 25
velocity_min = 5
diameter_max = 1.5
expected = set(
approach for approach in self.approaches
if start_date <= approach.time.date() <= end_date
and distance_min <= approach.distance <= distance_max
and velocity_min <= approach.velocity <= velocity_max
and approach.neo.diameter <= diameter_max
)
self.assertGreater(len(expected), 0)
filters = create_filters(
start_date=start_date, end_date=end_date,
distance_min=distance_min, distance_max=distance_max,
velocity_min=velocity_min, velocity_max=velocity_max,
diameter_max=diameter_max
)
received = set(self.db.query(filters))
self.assertEqual(expected, received, msg="Computed results do not match expected results.")
def test_query_approaches_in_spring_with_distance_velocity_and_diameter_bounds(self):
start_date = datetime.date(2020, 3, 1)
end_date = datetime.date(2020, 5, 31)
distance_max = 0.5
distance_min = 0.05
| |
command in. Max 5 channels per server. Overrides previous settings.
[Options]
Info (-info): check the options of the mapfeed on your server.
Map Status (-st): 1 = Ranked, 2 = Approved, 3 = Qualified, 4 = Loved. Exclusively these map statuses.
Modes (-m #): 0 = std, 1 = taiko, 2 = ctb, 3 = mania. Exclusively these modes.
Min Stars (-ls): greater than this value. (float)
Max Stars (-hs): less than this value. (float)
Min Length (-ll): maps longer than this value. (int, in seconds)
Max Length (-hl): maps shorter than this value. (int, in seconds)
Mapper (-mpr): Name or ID of mappers, ID preferred. Delimiter (,). (str)
Excluded Mapper (-xmpr): Name or ID of mappers, ID preferred. Delimiter (,). (str)
Remove (-rm): remove the channel from map feed.
[Example]
+<COMMAND> -m 03 -st 14 -xmpr "Sotarks,Monstrata,Stevy" -ls 5.2
"""
server = ctx.message.guild
channel = ctx.message.channel
user = ctx.message.author
map_track_limit = 5
new_channel_entry = {
"channel_id": str(channel.id), # just in case?
"status": [],
"modes": [],
"min_stars": None,
"max_stars": None,
"mappers": [],
"xmappers": [],
"max_length": None,
"min_length": None,
}
option_parser = OptionParser()
option_parser.add_option('st', 'status', opt_type='str', default=None)
option_parser.add_option('m', 'modes', opt_type='str', default=None)
option_parser.add_option('ls', 'min_stars', opt_type='float', default=None)
option_parser.add_option('hs', 'max_stars', opt_type='float', default=None)
option_parser.add_option('ll', 'min_length', opt_type='int', default=None)
option_parser.add_option('hl', 'max_length', opt_type='int', default=None)
option_parser.add_option('rm', 'remove', opt_type=None, default=False)
option_parser.add_option('o', 'overwrite', opt_type=None, default=False)
option_parser.add_option('info', 'info', opt_type=None, default=False)
option_parser.add_option('mpr', 'mappers', opt_type='str', default=None)
option_parser.add_option('xmpr', 'xmappers', opt_type='str', default=None)
_, options = option_parser.parse(options)
# get server info
server_mf_info = await self.get_server_mapfeed(server)
channel_mf_info = None
new_entry = False
if str(channel.id) in server_mf_info["channels"].keys():
channel_mf_info = server_mf_info["channels"][str(channel.id)]
else:
channel_mf_info = new_channel_entry
new_entry = True
if options["info"]:
# give info
embeds = []
if not server_mf_info['channels']:
return await ctx.send(
"**No maps are being tracked in this server.**")
for channel_id in server_mf_info["channels"]:
em = discord.Embed(color=user.colour)
em.set_author(name = f"{server.name}'s Map Feeds", icon_url = server.icon_url)
msg = ""
# try:
channel = self.bot.get_channel(int(channel_id))
for attr in server_mf_info["channels"][channel_id]:
value = server_mf_info["channels"][channel_id][attr]
if isinstance(value, list) and value != []:
value = ", ".join([str(i) for i in value]) # ensure string
elif value is None or value == []:
value = "N/A"
if attr == 'xmappers': # just too look better
attr = 'Excluded Mappers'
if "channel_id" not in attr:
msg += "**{}**: `{}`\n".format(attr.replace("_", " ").title(),
value)
em.add_field(name=f"#{channel.name}", value = msg)
embeds.append(em)
if embeds:
return await self.bot.menu(ctx, embeds)
else:
return ctx.send(":white_circle: **The server currently does not have any map feed channels!**")
if options["remove"]:
if channel_mf_info:
del server_mf_info["channels"][str(channel.id)]
await self.map_track.update_one({"server_id":str(server.id)},
{"$set": {"channels": server_mf_info["channels"]}})
return await ctx.send(f":white_check_mark: **`#{channel.name}` removed from map feed list.**")
else:
return await ctx.send(f":red_circle: **`#{channel.name}` is not on the map feed list.**")
if options["overwrite"]:
server_mf_info["channels"][str(channel.id)] = new_channel_entry
if new_entry and len(server_mf_info["channels"].keys()) >= map_track_limit and \
not options["overwrite"]:
return await ctx.send(f":red_circle: **You can only track in {map_track_limit} channels per server!**")
# ------ filtering options-------
valid_status = ["1", "2", "3", "4"]
if options["status"]:
status_list = list(options["status"])
user_status = []
for s in valid_status:
if s in status_list:
user_status.append(s)
channel_mf_info["status"] = user_status
else: # default to tracking all of them
channel_mf_info["status"] = valid_status
valid_modes = [0,1,2,3]
if options["modes"]:
user_modes = []
for m in valid_modes:
if str(m) in str(options["modes"]):
user_modes.append(m)
channel_mf_info["modes"] = user_modes
else:
channel_mf_info["modes"] = valid_modes
# -------- check stars-----------
if options["min_stars"]:
try:
min_stars = float(options["min_stars"])
channel_mf_info["min_stars"] = min_stars
except:
return await ctx.send(":red_circle: **Please check your minimum stars.**")
if min_stars > 6:
return await ctx.send(":red_circle: **Your minimum stars can't be greater than 6!**")
else:
channel_mf_info["min_stars"] = 0
if options["max_stars"]:
try:
max_stars = float(options["max_stars"])
channel_mf_info["max_stars"] = max_stars
except:
return await ctx.send(":red_circle: **Please check your maximum stars.**")
if (options["min_stars"] and min_stars) and max_stars < min_stars: # kinda riskyyy lol
return await ctx.send(":red_circle: **Min stars can't be greater than max stars...**")
else:
channel_mf_info["max_stars"] = 100
# -------- check length-----------
if options["min_length"]:
try:
min_length = int(options["min_length"])
channel_mf_info["min_length"] = min_length
except:
return await ctx.send(":red_circle: **Please check your minimum length (in seconds).**")
else:
channel_mf_info["min_length"] = 0
if options["max_length"]:
try:
max_length = int(options["max_length"])
channel_mf_info["max_length"] = max_length
except:
return await ctx.send(":red_circle: **Please check your maximum length (in seconds).**")
if (options["min_length"] and min_length) and max_length < min_length: # kinda riskyyy lol
return await ctx.send(":red_circle: **Min length can't be greater than max length...**")
else:
channel_mf_info["max_length"] = 10000 # god knows if we'll ever have a map this long
if options["mappers"]:
if str(options['mappers']) == None:
channel_mf_info['mappers'] = []
else:
user_mappers = []
mappers = options["mappers"].split(",")
for mapper in mappers:
mapper = mapper.strip()
if mapper:
user_mappers.append(mapper)
channel_mf_info["mappers"].extend(user_mappers)
if options["xmappers"]:
if str(options['xmappers']) == None:
channel_mf_info['xmappers'] = []
else:
user_mappers = []
mappers = options["xmappers"].split(",")
for mapper in mappers:
mapper = mapper.strip()
if mapper:
user_mappers.append(mapper)
channel_mf_info["xmappers"].extend(user_mappers)
server_mf_info["channels"][str(channel.id)] = channel_mf_info
# print(server_mf_info) # **
await self.map_track.update_one({"server_id":str(server.id)},
{"$set": {"channels": server_mf_info["channels"]}})
if options["overwrite"]:
return await ctx.send(f":white_check_mark: **Successfully overwrote map feed options in `#{channel.name}`.**")
elif new_entry:
return await ctx.send(f":white_check_mark: **Successfully activated map feed in `#{channel.name}`.**")
else:
return await ctx.send(f":white_check_mark: **Edited map feed for `#{channel.name}`.**")
#await ctx.send(server_mf_info["channels"][str(channel.id)])
async def get_server_mapfeed(self, server):
server_mf_info = await self.map_track.find_one({"server_id": str(server.id)})
if not server_mf_info:
new_entry = {
"server_id": str(server.id),
"channels": {}
}
await self.map_track.insert_one(new_entry)
server_mf_info = await self.map_track.find_one({"server_id": str(server.id)})
return server_mf_info
async def map_feed(self):
MAP_FEED_INTERVAL = 60 # seconds
print("RUNNING Map Feed")
# use a json file instead of database
filepath = os.path.join(os.getcwd(), "cogs/osu/temp/map_feed.json")
if not os.path.exists(filepath):
map_feed_last = datetime.datetime.utcnow()
sql_date = datetime.datetime.strftime(map_feed_last, '%Y-%m-%d %H:%M:%S')
map_json = {"last_check": sql_date}
fileIO(filepath, "save", data=map_json)
else:
map_feed_last_str = fileIO(filepath, "load")['last_check']
map_feed_last = datetime.datetime.strptime(map_feed_last_str, '%Y-%m-%d %H:%M:%S')
while self == self.bot.get_cog('Osu'):
# get new beatmaps
sql_date = datetime.datetime.strftime(map_feed_last, '%Y-%m-%d %H:%M:%S')
try:
beatmaps = await self.owoAPI.get_beatmap(None, since=sql_date, use_cache=False)
except:
await self.map_feed()
# print(beatmaps)
print('QUERY TIME', sql_date)
# save and update
map_feed_last = datetime.datetime.utcnow()
sql_date_new = datetime.datetime.strftime(map_feed_last, '%Y-%m-%d %H:%M:%S')
map_json = {"last_check": sql_date_new}
fileIO(filepath, "save", data=map_json)
print('UPDATED TIME', sql_date_new)
# print('Time elapsed', sql_date-map_feed_last)
# display beatmaps
new_beatmapsets = self._group_beatmaps(beatmaps)
for beatmapset_id in new_beatmapsets:
# new_beatmapset = new_beatmapsets[beatmapset_id]
new_beatmapset = await self.owoAPI.get_beatmapset(beatmapset_id)
# filter out the converts
new_filtered = []
for new_bmp in new_beatmapset:
if new_bmp['convert']:
continue
new_filtered.append(new_bmp)
new_bmpset_embed = await self._create_new_bmp_embed(new_filtered)
bmpset_summary = self._get_bmpset_summary(new_filtered)
# send to appropriate channels
async for server_options in self.map_track.find({}, no_cursor_timeout=True):
guild_id = int(server_options["server_id"])
for channel_id in server_options['channels']:
channel_options = server_options['channels'][channel_id]
channel = self.bot.get_channel(int(channel_id))
# if pass the filters
guest_mapper = False # only for the mappers, not xmappers
for option_mapper in channel_options['mappers']:
for diff_name in bmpset_summary['diff_names']:
if option_mapper.lower() in diff_name.lower():
guest_mapper = True
if 'mappers' in channel_options and channel_options['mappers'] != [] and \
bmpset_summary['creator'] not in channel_options['mappers'] and \
bmpset_summary['creator_id'] not in channel_options['mappers'] and \
not guest_mapper:
continue
if 'xmappers' in channel_options and channel_options['xmappers'] != [] and \
(bmpset_summary['creator'] in channel_options['xmappers'] or \
bmpset_summary['creator_id'] in channel_options['xmappers']):
continue
if channel_options['min_length'] is not None and \
float(channel_options['min_length']) > float(bmpset_summary['total_length']):
continue
if channel_options['max_length'] is not None and \
float(channel_options['max_length']) < float(bmpset_summary['total_length']):
continue
if channel_options['min_stars'] is not None and \
all([float(channel_options['min_stars']) > float(bmp_diff) for bmp_diff in bmpset_summary['stars']]):
continue
if channel_options['max_stars'] is not None and \
all([float(channel_options['max_stars']) < float(bmp_diff) for bmp_diff in bmpset_summary['stars']]):
continue
if channel_options['max_stars'] is not None and \
all([float(channel_options['max_stars']) < float(bmp_diff) for bmp_diff in bmpset_summary['stars']]):
continue
if not set(channel_options['modes']).intersection(set(bmpset_summary['modes'])):
continue
if str(bmpset_summary['status']) not in channel_options['status']:
continue
if channel:
await channel.send(embed=new_bmpset_embed)
await asyncio.sleep(1)
await asyncio.sleep(MAP_FEED_INTERVAL)
async def _create_new_bmp_embed(self, beatmaps):
# create embed
status = beatmaps[0]['status']
em = discord.Embed()
# determine color of embed based on status
colour, colour_text = self._determine_status_color(status) # not the case anymore!
m0, s0 = divmod(int(beatmaps[0]['total_length']), 60)
desc = '**Length:** {}:{} **BPM:** {}\n'.format(m0,
str(s0).zfill(2), beatmaps[0]['bpm'])
# download links
dl_links = self._get_dl_links(beatmaps[0])
dl_text_links = []
for dl_name, dl_link in dl_links:
dl_text_links.append("[{}]({})".format(dl_name, dl_link))
desc += '**Download:** {}\n'.format(" | ".join(dl_text_links))
# symbols/diffs
desc += self._get_beatmap_diff_icons(beatmaps) + "\n"
beatmap_url = "https://osu.ppy.sh/beatmapsets/{}/".format(beatmaps[0]["beatmapset_id"])
# create return em
em.colour = colour
em.description = desc
profile_url = await self.owoAPI.get_user_avatar(beatmaps[0]["user_id"], 'bancho')
em.set_author(name="{} – {} by {}".format(
beatmaps[0]['artist'], beatmaps[0]['title'], beatmaps[0]['creator']),
url=beatmap_url, icon_url = profile_url)
try:
map_cover_url = beatmaps[0]["covers"]["card@2x"]
except: # old api
map_cover_url = 'https://assets.ppy.sh/beatmaps/{}/covers/cover.jpg'.format(beatmaps[0]["beatmapset_id"])
em.set_image(url=map_cover_url)
rel_time = datetime.datetime.strptime(beatmaps[0]['ranked_date'], '%Y-%m-%d %H:%M:%S').strftime('%B %d %Y at %H:%M:%S')
fav_count = "{} ❤︎ | ".format(beatmaps[0]["favourite_count"])
em.set_footer(text = 'Newly {} | {}{} on | |
<filename>build/lib/smileml/ml/random_layer.py
# -*- coding: utf8
# Author: <NAME> [dcl -at- panix -dot- com]
# Copyright(c) 2013
# License: Simple BSD
"""The :mod:`random_layer` module
implements Random Layer transformers.
Random layers are arrays of hidden unit activations that are
random functions of input activation values (dot products for simple
activation functions, distances from prototypes for radial basis
functions).
They are used in the implementation of Extreme Learning Machines (ELMs),
but can be used as a general input mapping.
"""
from abc import ABCMeta, abstractmethod
from math import sqrt
import numpy as np
import scipy.sparse as sp
from scipy.spatial.distance import cdist, pdist, squareform
from sklearn.metrics import pairwise_distances
from sklearn.utils import check_random_state, check_array
from sklearn.utils.extmath import safe_sparse_dot
from sklearn.base import BaseEstimator, TransformerMixin
__all__ = [
'RandomLayer',
'MLPRandomLayer',
'RBFRandomLayer',
'GRBFRandomLayer',
]
class BaseRandomLayer(BaseEstimator, TransformerMixin):
"""Abstract Base Class for random layers"""
__metaclass__ = ABCMeta
_internal_activation_funcs = dict()
@classmethod
def activation_func_names(cls):
"""Get list of internal activation function names"""
return cls._internal_activation_funcs.keys()
# take n_hidden and random_state, init components_ and
# input_activations_
def __init__(self, n_hidden=20, random_state=0, activation_func=None,
activation_args=None):
self.n_hidden = n_hidden
self.random_state = random_state
self.activation_func = activation_func
self.activation_args = activation_args
self.components_ = dict()
self.input_activations_ = None
# keyword args for internally defined funcs
self._extra_args = dict()
@abstractmethod
def _generate_components(self, X):
"""Generate components of hidden layer given X"""
@abstractmethod
def _compute_input_activations(self, X):
"""Compute input activations given X"""
# compute input activations and pass them
# through the hidden layer transfer functions
# to compute the transform
def _compute_hidden_activations(self, X):
"""Compute hidden activations given X"""
self._compute_input_activations(X)
acts = self.input_activations_
if (callable(self.activation_func)):
args_dict = self.activation_args if (self.activation_args) else {}
X_new = self.activation_func(acts, **args_dict)
else:
func_name = self.activation_func
func = self._internal_activation_funcs[func_name]
X_new = func(acts, **self._extra_args)
return X_new
# perform fit by generating random components based
# on the input array
def fit(self, X, y=None):
"""Generate a random hidden layer.
Parameters
----------
X : {array-like, sparse matrix} of shape [n_samples, n_features]
Training set: only the shape is used to generate random component
values for hidden units
y : is not used: placeholder to allow for usage in a Pipeline.
Returns
-------
self
"""
X = check_array(X)
self._generate_components(X)
return self
# perform transformation by calling compute_hidden_activations
# (which will normally call compute_input_activations first)
def transform(self, X, y=None):
"""Generate the random hidden layer's activations given X as input.
Parameters
----------
X : {array-like, sparse matrix}, shape [n_samples, n_features]
Data to transform
y : is not used: placeholder to allow for usage in a Pipeline.
Returns
-------
X_new : numpy array of shape [n_samples, n_components]
"""
X = check_array(X)
if (self.components_ is None):
raise ValueError('No components initialized')
return self._compute_hidden_activations(X)
class RandomLayer(BaseRandomLayer):
"""RandomLayer is a transformer that creates a feature mapping of the
inputs that corresponds to a layer of hidden units with randomly
generated components.
The transformed values are a specified function of input activations
that are a weighted combination of dot product (multilayer perceptron)
and distance (rbf) activations:
input_activation = alpha * mlp_activation + (1-alpha) * rbf_activation
mlp_activation(x) = dot(x, weights) + bias
rbf_activation(x) = rbf_width * ||x - center||/radius
alpha and rbf_width are specified by the user
weights and biases are taken from normal distribution of
mean 0 and sd of 1
centers are taken uniformly from the bounding hyperrectangle
of the inputs, and radii are max(||x-c||)/sqrt(n_centers*2)
The input activation is transformed by a transfer function that defaults
to numpy.tanh if not specified, but can be any callable that returns an
array of the same shape as its argument (the input activation array, of
shape [n_samples, n_hidden]). Functions provided are 'sine', 'tanh',
'tribas', 'inv_tribas', 'sigmoid', 'hardlim', 'softlim', 'gaussian',
'multiquadric', 'inv_multiquadric' and 'reclinear'.
Parameters
----------
`n_hidden` : int, optional (default=20)
Number of units to generate
`alpha` : float, optional (default=0.5)
Mixing coefficient for distance and dot product input activations:
activation = alpha*mlp_activation + (1-alpha)*rbf_width*rbf_activation
`rbf_width` : float, optional (default=1.0)
multiplier on rbf_activation
`user_components`: dictionary, optional (default=None)
dictionary containing values for components that woud otherwise be
randomly generated. Valid key/value pairs are as follows:
'radii' : array-like of shape [n_hidden]
'centers': array-like of shape [n_hidden, n_features]
'biases' : array-like of shape [n_hidden]
'weights': array-like of shape [n_features, n_hidden]
`activation_func` : {callable, string} optional (default='tanh')
Function used to transform input activation
It must be one of 'tanh', 'sine', 'tribas', 'inv_tribas',
'sigmoid', 'hardlim', 'softlim', 'gaussian', 'multiquadric',
'inv_multiquadric', 'reclinear' or a callable. If None is given,
'tanh' will be used.
If a callable is given, it will be used to compute the activations.
`activation_args` : dictionary, optional (default=None)
Supplies keyword arguments for a callable activation_func
`random_state` : int, RandomState instance or None (default=None)
Control the pseudo random number generator used to generate the
hidden unit weights at fit time.
Attributes
----------
`input_activations_` : numpy array of shape [n_samples, n_hidden]
Array containing dot(x, hidden_weights) + bias for all samples
`components_` : dictionary containing two keys:
`bias_weights_` : numpy array of shape [n_hidden]
`hidden_weights_` : numpy array of shape [n_features, n_hidden]
See Also
--------
"""
# triangular activation function
_tribas = (lambda x: np.clip(1.0 - np.fabs(x), 0.0, 1.0))
# inverse triangular activation function
_inv_tribas = (lambda x: np.clip(np.fabs(x), 0.0, 1.0))
# sigmoid activation function
_sigmoid = (lambda x: 1.0/(1.0 + np.exp(-x)))
# hard limit activation function
_hardlim = (lambda x: np.array(x > 0.0, dtype=float))
_softlim = (lambda x: np.clip(x, 0.0, 1.0))
# gaussian RBF
_gaussian = (lambda x: np.exp(-pow(x, 2.0)))
# multiquadric RBF
_multiquadric = (lambda x:
np.sqrt(1.0 + pow(x, 2.0)))
# inverse multiquadric RBF
_inv_multiquadric = (lambda x:
1.0/(np.sqrt(1.0 + pow(x, 2.0))))
# rectified linear: max(0, x)
_reclinear = (lambda x: np.maximum(0, x))
# internal activation function table
_internal_activation_funcs = {'sine': np.sin,
'tanh': np.tanh,
'tribas': _tribas,
'inv_tribas': _inv_tribas,
'sigmoid': _sigmoid,
'softlim': _softlim,
'hardlim': _hardlim,
'gaussian': _gaussian,
'multiquadric': _multiquadric,
'inv_multiquadric': _inv_multiquadric,
'reclinear': _reclinear
}
def __init__(self, n_hidden=20, alpha=0.5, random_state=None,
activation_func='tanh', activation_args=None,
user_components=None, rbf_width=1.0):
super(RandomLayer, self).__init__(n_hidden=n_hidden,
random_state=random_state,
activation_func=activation_func,
activation_args=activation_args)
if (isinstance(self.activation_func, str)):
func_names = self._internal_activation_funcs.keys()
if (self.activation_func not in func_names):
msg = "unknown activation function '%s'" % self.activation_func
raise ValueError(msg)
self.alpha = alpha
self.rbf_width = rbf_width
self.user_components = user_components
self._use_mlp_input = (self.alpha != 0.0)
self._use_rbf_input = (self.alpha != 1.0)
def _get_user_components(self, key):
"""Look for given user component"""
try:
return self.user_components[key]
except (TypeError, KeyError):
return None
def _compute_radii(self):
"""Generate RBF radii"""
# use supplied radii if present
radii = self._get_user_components('radii')
# compute radii
if (radii is None):
centers = self.components_['centers']
n_centers = centers.shape[0]
max_dist = np.max(pairwise_distances(centers))
radii = np.ones(n_centers) * max_dist/sqrt(2.0 * n_centers)
self.components_['radii'] = radii
def _compute_centers(self, X, sparse, rs):
"""Generate RBF centers"""
# use supplied centers if present
centers = self._get_user_components('centers')
# use points taken uniformly from the bounding
# hyperrectangle
if (centers is None):
n_features = X.shape[1]
if (sparse):
fxr = range(n_features)
cols = [X.getcol(i) for i in fxr]
min_dtype = X.dtype.type(1.0e10)
sp_min = lambda col: np.minimum(min_dtype, np.min(col.data)) # noqa: E731
min_Xs = np.array(map(sp_min, cols))
max_dtype = X.dtype.type(-1.0e10)
sp_max = lambda col: np.maximum(max_dtype, np.max(col.data)) # noqa: E731
max_Xs = np.array(map(sp_max, cols))
else:
min_Xs = X.min(axis=0)
max_Xs = X.max(axis=0)
spans = max_Xs - min_Xs
ctrs_size = (self.n_hidden, n_features)
centers = min_Xs + spans * rs.uniform(0.0, 1.0, ctrs_size)
self.components_['centers'] = centers
def _compute_biases(self, rs):
"""Generate MLP biases"""
# use supplied biases if present
biases = self._get_user_components('biases')
if (biases is None):
b_size = self.n_hidden
biases = rs.normal(size=b_size)
self.components_['biases'] = biases
def _compute_weights(self, X, rs):
"""Generate MLP weights"""
# use supplied weights if present
weights = self._get_user_components('weights')
if (weights is None):
n_features = X.shape[1]
hw_size = (n_features, self.n_hidden)
weights = rs.normal(size=hw_size)
self.components_['weights'] = weights
def _generate_components(self, X):
"""Generate components of hidden layer given X"""
rs = check_random_state(self.random_state)
if (self._use_mlp_input):
self._compute_biases(rs)
self._compute_weights(X, rs)
if (self._use_rbf_input):
self._compute_centers(X, sp.issparse(X), rs)
self._compute_radii()
def _compute_input_activations(self, X):
"""Compute input activations given X"""
n_samples = X.shape[0]
mlp_acts = np.zeros((n_samples, self.n_hidden))
if (self._use_mlp_input):
b = self.components_['biases']
w = self.components_['weights']
mlp_acts = self.alpha * (safe_sparse_dot(X, w) + b)
rbf_acts = np.zeros((n_samples, self.n_hidden))
if (self._use_rbf_input):
radii = self.components_['radii']
centers = self.components_['centers']
scale = self.rbf_width * (1.0 - self.alpha)
rbf_acts = scale * cdist(X, centers)/radii
self.input_activations_ = mlp_acts + rbf_acts
class MLPRandomLayer(RandomLayer):
"""Wrapper for RandomLayer with alpha (mixing coefficient) set
to 1.0 for | |
<filename>nodes/cozmo_driver.py
#!/usr/bin/python3.5
# -*- encoding: utf-8 -*-
"""
This file implements an ANKI Cozmo ROS driver.
It wraps up several functionality of the Cozmo SDK including
camera and motors. As some main ROS parts are not python3.5
compatible, the famous "transformations.py" is shipped next
to this node. Also the TransformBroadcaster is taken from
ROS tf ones.
Copyright {2016} {<NAME>}
Copyright {2017} {<NAME>}
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
# system
import sys
import numpy as np
from copy import deepcopy
# cozmo SDK
import cozmo
from cozmo.util import radians
# ROS
import rospy
from transformations import quaternion_from_euler
from camera_info_manager import CameraInfoManager
# ROS msgs
from tf2_msgs.msg import TFMessage
from nav_msgs.msg import Odometry
from geometry_msgs.msg import (
Twist,
TransformStamped
)
from std_msgs.msg import (
String,
Float64,
ColorRGBA,
)
from sensor_msgs.msg import (
Image,
CameraInfo,
BatteryState,
Imu,
JointState,
)
from cozmo.util import distance_mm, speed_mmps
# reused as original is not Python3 compatible
class TransformBroadcaster(object):
"""
:class:`TransformBroadcaster` is a convenient way to send transformation updates on the ``"/tf"`` message topic.
"""
def __init__(self, queue_size=100):
self.pub_tf = rospy.Publisher("/tf", TFMessage, queue_size=queue_size)
def send_transform(self, translation, rotation, time, child, parent):
"""
:param translation: the translation of the transformation as a tuple (x, y, z)
:param rotation: the rotation of the transformation as a tuple (x, y, z, w)
:param time: the time of the transformation, as a rospy.Time()
:param child: child frame in tf, string
:param parent: parent frame in tf, string
Broadcast the transformation from tf frame child to parent on ROS topic ``"/tf"``.
"""
t = TransformStamped()
t.header.frame_id = parent
t.header.stamp = time
t.child_frame_id = child
t.transform.translation.x = translation[0]
t.transform.translation.y = translation[1]
t.transform.translation.z = translation[2]
t.transform.rotation.x = rotation[0]
t.transform.rotation.y = rotation[1]
t.transform.rotation.z = rotation[2]
t.transform.rotation.w = rotation[3]
self.send_transform_message(t)
def send_transform_message(self, transform):
"""
:param transform: geometry_msgs.msg.TransformStamped
Broadcast the transformation from tf frame child to parent on ROS topic ``"/tf"``.
"""
tfm = TFMessage([transform])
self.pub_tf.publish(tfm)
class CozmoRos(object):
"""
The Cozmo ROS driver object.
"""
def __init__(self, coz):
"""
:type coz: cozmo.Robot
:param coz: The cozmo SDK robot handle (object).
"""
# vars
self._cozmo = coz
self._lin_vel = .0
self._ang_vel = .0
self._cmd_lin_vel = .0
self._cmd_ang_vel = .0
self._last_pose = self._cozmo.pose
self._wheel_vel = (0, 0)
self._optical_frame_orientation = quaternion_from_euler(-np.pi/2., .0, -np.pi/2.)
self._camera_info_manager = CameraInfoManager('cozmo_camera', namespace='/cozmo_camera')
# tf
self._tfb = TransformBroadcaster()
# params
self._odom_frame = rospy.get_param('~odom_frame', 'odom')
self._footprint_frame = rospy.get_param('~footprint_frame', 'base_footprint')
self._base_frame = rospy.get_param('~base_frame', 'base_link')
self._head_frame = rospy.get_param('~head_frame', 'head_link')
self._camera_frame = rospy.get_param('~camera_frame', 'camera_link')
self._camera_optical_frame = rospy.get_param('~camera_optical_frame', 'cozmo_camera')
camera_info_url = rospy.get_param('~camera_info_url', '')
# pubs
self._joint_state_pub = rospy.Publisher('joint_states', JointState, queue_size=1)
self._odom_pub = rospy.Publisher('odom', Odometry, queue_size=1)
self._imu_pub = rospy.Publisher('imu', Imu, queue_size=1)
self._battery_pub = rospy.Publisher('battery', BatteryState, queue_size=1)
# Note: camera is published under global topic (preceding "/")
self._image_pub = rospy.Publisher('/cozmo_camera/image', Image, queue_size=10)
self._camera_info_pub = rospy.Publisher('/cozmo_camera/camera_info', CameraInfo, queue_size=10)
# subs
self._backpack_led_sub = rospy.Subscriber(
'backpack_led', ColorRGBA, self._set_backpack_led, queue_size=1)
#self._twist_sub = rospy.Subscriber('cmd_vel', Twist, self._twist_callback, queue_size=1)
self._twist_sub = rospy.Subscriber('cmd_vel', Twist, self._drive_straight, queue_size=1)
self._say_sub = rospy.Subscriber('say', String, self._say_callback, queue_size=1)
self._head_sub = rospy.Subscriber('head_angle', Float64, self._move_head, queue_size=1)
self._lift_sub = rospy.Subscriber('lift_height', Float64, self._move_lift, queue_size=1)
# camera info manager
self._camera_info_manager.setURL(camera_info_url)
self._camera_info_manager.loadCameraInfo()
def _drive_straight(self, cmd):
action = self._cozmo.drive_straight(distance_mm(150), speed_mmps(cmd.linear.x))
action.wait_for_completed()
def _move_head(self, cmd):
"""
Move head to given angle.
:type cmd: Float64
:param cmd: The message containing angle in degrees. [-25 - 44.5]
"""
#action = self._cozmo.set_head_angle(radians(cmd.data * np.pi / 180.), duration=0.1,
#in_parallel=True)
#action.wait_for_completed()
def _move_lift(self, cmd):
"""
Move lift to given height.
:type cmd: Float64
:param cmd: A value between [0 - 1], the SDK auto
scales it to the according height.
"""
#action = self._cozmo.set_lift_height(height=cmd.data,
#duration=0.2, in_parallel=True)
#action.wait_for_completed()
def _set_backpack_led(self, msg):
"""
Set the color of the backpack LEDs.
:type msg: ColorRGBA
:param msg: The color to be set.
"""
# setup color as integer values
color = [int(x * 255) for x in [msg.r, msg.g, msg.b, msg.a]]
# create lights object with duration
light = cozmo.lights.Light(cozmo.lights.Color(rgba=color), on_period_ms=1000)
# set lights
self._cozmo.set_all_backpack_lights(light)
def _twist_callback(self, cmd):
"""
Set commanded velocities from Twist message.
The commands are actually send/set during run loop, so delay
is in worst case up to 1 / update_rate seconds.
:type cmd: Twist
:param cmd: The commanded velocities.
"""
# compute differential wheel speed
axle_length = 0.07 # 7cm
self._cmd_lin_vel = cmd.linear.x
self._cmd_ang_vel = cmd.angular.z
rv = self._cmd_lin_vel + (self._cmd_ang_vel * axle_length * 0.5)
lv = self._cmd_lin_vel - (self._cmd_ang_vel * axle_length * 0.5)
self._wheel_vel = (lv*1000., rv*1000.) # convert to mm / s
def _say_callback(self, msg):
"""
The callback for incoming text messages to be said.
:type msg: String
:param msg: The text message to say.
"""
self._cozmo.say_text(msg.data).wait_for_completed()
def _publish_objects(self):
"""
Publish detected object as transforms between odom_frame and object_frame.
"""
for obj in self._cozmo.world.visible_objects:
now = rospy.Time.now()
x = obj.pose.position.x * 0.001
y = obj.pose.position.y * 0.001
z = obj.pose.position.z * 0.001
q = (obj.pose.rotation.q1, obj.pose.rotation.q2, obj.pose.rotation.q3, obj.pose.rotation.q0)
self._tfb.send_transform(
(x, y, z), q, now, 'cube_' + str(obj.object_id), self._odom_frame
)
def _publish_image(self):
"""
Publish latest camera image as Image with CameraInfo.
"""
# only publish if we have a subscriber
if self._image_pub.get_num_connections() == 0:
return
# get latest image from cozmo's camera
camera_image = self._cozmo.world.latest_image
if camera_image is not None:
# convert image to gray scale as it is gray although
img = camera_image.raw_image.convert('L')
ros_img = Image()
ros_img.encoding = 'mono8'
ros_img.width = img.size[0]
ros_img.height = img.size[1]
ros_img.step = ros_img.width
ros_img.data = img.tobytes()
ros_img.header.frame_id = 'cozmo_camera'
cozmo_time = camera_image.image_recv_time
ros_img.header.stamp = rospy.Time.from_sec(cozmo_time)
# publish images and camera info
self._image_pub.publish(ros_img)
camera_info = self._camera_info_manager.getCameraInfo()
camera_info.header = ros_img.header
self._camera_info_pub.publish(camera_info)
def _publish_joint_state(self):
"""
Publish joint states as JointStates.
"""
# only publish if we have a subscriber
if self._joint_state_pub.get_num_connections() == 0:
return
js = JointState()
js.header.stamp = rospy.Time.now()
js.header.frame_id = 'cozmo'
js.name = ['head', 'lift']
js.position = [self._cozmo.head_angle.radians,
self._cozmo.lift_height.distance_mm * 0.001]
js.velocity = [0.0, 0.0]
js.effort = [0.0, 0.0]
self._joint_state_pub.publish(js)
def _publish_imu(self):
"""
Publish inertia data as Imu message.
"""
# only publish if we have a subscriber
if self._imu_pub.get_num_connections() == 0:
return
imu = Imu()
imu.header.stamp = rospy.Time.now()
imu.header.frame_id = self._base_frame
imu.orientation.w = self._cozmo.pose.rotation.q0
imu.orientation.x = self._cozmo.pose.rotation.q1
imu.orientation.y = self._cozmo.pose.rotation.q2
imu.orientation.z = self._cozmo.pose.rotation.q3
imu.angular_velocity.x = self._cozmo.gyro.x
imu.angular_velocity.y = self._cozmo.gyro.y
imu.angular_velocity.z = self._cozmo.gyro.z
imu.linear_acceleration.x = self._cozmo.accelerometer.x * 0.001
imu.linear_acceleration.y = self._cozmo.accelerometer.y * 0.001
imu.linear_acceleration.z = self._cozmo.accelerometer.z * 0.001
self._imu_pub.publish(imu)
def _publish_battery(self):
"""
Publish battery as BatteryState message.
"""
# only publish if we have a subscriber
if self._battery_pub.get_num_connections() == 0:
return
battery = BatteryState()
battery.header.stamp = rospy.Time.now()
battery.voltage = self._cozmo.battery_voltage
battery.present = True
if self._cozmo.is_on_charger: # is_charging always return False
battery.power_supply_status = BatteryState.POWER_SUPPLY_STATUS_CHARGING
else:
battery.power_supply_status = BatteryState.POWER_SUPPLY_STATUS_NOT_CHARGING
self._battery_pub.publish(battery)
def _publish_odometry(self):
"""
Publish current pose as Odometry message.
"""
# only publish if we have a subscriber
if self._odom_pub.get_num_connections() == 0:
return
now = rospy.Time.now()
odom = Odometry()
odom.header.frame_id = self._odom_frame
odom.header.stamp = now
odom.child_frame_id = self._footprint_frame
odom.pose.pose.position.x = self._cozmo.pose.position.x * 0.001
odom.pose.pose.position.y = self._cozmo.pose.position.y * 0.001
odom.pose.pose.position.z = self._cozmo.pose.position.z * 0.001
q = quaternion_from_euler(.0, .0, self._cozmo.pose_angle.radians)
odom.pose.pose.orientation.x = q[0]
odom.pose.pose.orientation.y = q[1]
odom.pose.pose.orientation.z = q[2]
odom.pose.pose.orientation.w = q[3]
odom.pose.covariance = np.diag([1e-2, 1e-2, 1e-2, 1e3, 1e3, 1e-1]).ravel()
odom.twist.twist.linear.x = self._lin_vel
odom.twist.twist.angular.z = self._ang_vel
odom.twist.covariance = np.diag([1e-2, 1e3, 1e3, 1e3, 1e3, 1e-2]).ravel()
self._odom_pub.publish(odom)
def _publish_tf(self, update_rate):
"""
Broadcast current transformations and update
measured velocities for odometry twist.
Published transforms:
odom_frame -> footprint_frame
footprint_frame -> base_frame
base_frame -> head_frame
head_frame -> camera_frame
camera_frame -> camera_optical_frame
"""
now = rospy.Time.now()
x = self._cozmo.pose.position.x * 0.001
y = self._cozmo.pose.position.y * 0.001
z = self._cozmo.pose.position.z * 0.001
# compute current linear and angular velocity from pose change
# Note: Sign for linear velocity is taken from commanded velocities!
# Note: The angular velocity can also be taken from gyroscopes!
delta_pose = self._last_pose - self._cozmo.pose
dist = np.sqrt(delta_pose.position.x**2
+ delta_pose.position.y**2
+ delta_pose.position.z**2) / 1000.0
self._lin_vel = dist * update_rate * np.sign(self._cmd_lin_vel)
self._ang_vel = -delta_pose.rotation.angle_z.radians * update_rate
# publish odom_frame -> footprint_frame
| |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Author: <NAME>
# PRPE technical version 6
#=============================================================================
import sys
from collections import Counter
from copy import deepcopy
from numpy import argmin
goodroots = Counter()
badroots = {}
goodprefixes = Counter()
badprefixes = {}
goodpostfixes = Counter()
badpostfixes = {}
premaxlen = 8
postmaxlen = 7
minrootlen = 2
minpreflen = 2
def isUlower(word):
return len(word)>=2 and word[0:1].isupper() and word[1:].islower()
def processUlower(word):
if isUlower(word):
return word.lower()
else: return word
def search_codetree(tword,codetree):
""" Stored in codetree with non-zero value in the terminal node
"""
pos = 0
while True:
s = tword[pos]
if s not in codetree:
return 0
elif pos==len(tword)-1:
return codetree[s][0]
else:
pos += 1
codetree = codetree[s][1]
def search_codetree_hasleftsub(tword,codetree):
""" Stored in codetree with non-zero value in any except the terminal node
"""
pos = 0
while True:
s = tword[pos]
if s not in codetree:
return 0
elif codetree[s][0]>0:
return codetree[s][0]
elif pos==len(tword)-1:
return 0
else:
pos += 1
codetree = codetree[s][1]
def search_codetree_isleftsub(tword,codetree):
""" Stored in codetree having any value terminal node (i.e., reaching terminal node)
"""
pos = 0
while True:
s = tword[pos]
if s not in codetree:
return 0
elif pos==len(tword)-1:
return 1
else:
pos += 1
codetree = codetree[s][1]
def add_to_codetree(tword,codetree,freq=1):
""" Adds one tuple-word to tree structure - one node per symbol
word end in the tree characterized by node[0]>0
"""
unique=0
for pos in range(len(tword)):
s = tword[pos]
if s not in codetree:
codetree[s] = [0,{}]
unique+=1
codetree[s][0] += freq
codetree = codetree[s][1]
return unique
def add_to_vocab_multi(word,vocab,freq):
for pos in range(len(word)):
if not word[pos].isalpha(): return
vocab[word[:pos+1]] += freq
def add_to_vocab_multi_reverse(word,vocab,postmaxlen,minrootlen,freq):
""" Adds one tuple-word to tree structure - one node per symbol
word end in the tree characterized by node[0]>0
"""
pos = 0
while pos<len(word)-minrootlen and pos<postmaxlen:
vocab[word[:pos+1]] += freq
pos+=1
def add_to_codetree_terminal(tword,codetree,freq=1):
""" Adds one tuple-word to tree structure - one node per symbol
word end in the tree characterized by node[0]>0
"""
for pos in range(len(tword)):
s = tword[pos]
if s not in codetree:
codetree[s] = [0,{}]
if pos==len(tword)-1:
codetree[s][0] = freq
else:
codetree = codetree[s][1]
def read_codetree(datafile,reverse=False):
codetree = {}
for line in datafile:
item = line.split()
word = item[0]
if reverse: word=word[::-1]
if len(item)>1:
num = int(item[1])
else:
num = 1
add_to_codetree_terminal(word,codetree,num)
return codetree
def read_vocabulary(vocabfile,reverse=False):
vocab = Counter()
rcounter = 999999999
for line in vocabfile:
item = line.split()
word = item[0]
if reverse: word=word[::-1]
if len(item)>1:
num = int(item[1])
else:
num = rcounter
rcounter-=1
vocab[word] = num
return vocab
def extract_vocabulary(infile):
vocab = Counter()
for line in infile:
for word in line.split():
# word = processMeta(word)
word = processUlower(word)
vocab[word] += 1
return vocab
def save_vocabulary(vocabfile,vocab,order=False,reverseorder=True,alphaonly=False,maxcount=None,vocabout=None):
cnt = 0
if order:
for item in sorted(vocab.items(),key=lambda x: x[1],reverse=reverseorder):
if maxcount is not None and cnt==maxcount: return
if not alphaonly or item[0].isalpha():
vocabfile.write(u"{0} {1}\n".format(item[0],item[1]))
if vocabout is not None: vocabout[item[0]]=item[1]
cnt+=1
else:
for item in vocab.items():
if maxcount is not None and cnt==maxcount: return
if not alphaonly or item[0].isalpha():
vocabfile.write(u"{0} {1}\n".format(item[0],item[1]))
if vocabout is not None: vocabout[item[0]]=item[1]
cnt+=1
def register_subwords(infile,premaxlen,postmaxlen,minrootlen,isvocabin=False,vocabout=None,rawprefixfile=None,rawpostfixfile=None,loadrawfile=False,freqnotrank=False):
rawprecodetree = {}
rawpostcodetree = {}
if isvocabin:
vocab = read_vocabulary(infile)
else:
vocab = extract_vocabulary(infile)
if loadrawfile:
rawprevocab = read_vocabulary(rawprefixfile)
rawpostvocab = read_vocabulary(rawpostfixfile)
else:
rawprevocab = Counter()
rawpostvocab = Counter()
for item in vocab.items():
word = item[0]
freq = item[1]
preword =word[:premaxlen]
add_to_vocab_multi(preword,rawprevocab,freq)
add_to_vocab_multi_reverse(word[::-1],rawpostvocab,postmaxlen,minrootlen,freq)
# funique = len(rawprevocab)
# runique = len(rawpostvocab)
prevfreq = -1
num = 0
for item in sorted(rawprevocab.items(),key=lambda x: x[1],reverse=True):
word = item[0]
freq = item[1]
if freqnotrank:
num = freq
else:
if freq!=prevfreq: num+=1
add_to_codetree_terminal(word,rawprecodetree,num)
if not loadrawfile and rawprefixfile:
rawprefixfile.write(" {0} {1}\n".format(word,num))
prevfreq = freq
prevfreq = -1
num = 0
for item in sorted(rawpostvocab.items(),key=lambda x: x[1],reverse=True):
word = item[0]
freq = item[1]
if freqnotrank:
num = freq
else:
if freq!=prevfreq: num+=1
# if freq!=prevfreq: num+=1 # tmp not used
add_to_codetree_terminal(word,rawpostcodetree,num)
if not loadrawfile and rawpostfixfile:
rawpostfixfile.write(" {0} {1}\n".format(word,num))
prevfreq = freq
# print("vocab",len(vocab))
# print("funique",funique)
# print("runique",runique)
if vocabout is not None:
save_vocabulary(vocabout,vocab,True)
return rawprecodetree,rawpostcodetree,vocab,rawprevocab
def print_subwords(infile,codefile,n,reverse=False):
ngrams = Counter()
vocab = extract_vocabulary(infile)
# register (left or right) n-grams
for word in vocab.keys():
if reverse:
if len(word)>=n+1: # right without first
ngrams[word[-n:]] += 1
else:
if len(word)>=n: # left
ngrams[word[:n]] += 1
# count and print (left or right) n-grams
print(len(ngrams))
for item in sorted(ngrams.items(),key=lambda x: x[1],reverse=True):
codefile.write("{0} {1}\n".format(item[0],item[1]))
def add_subwords(codetree,tword,pos,subgraph):
pos0 = pos
while pos < len(tword):
s = tword[pos]
if s not in codetree:
return
else:
if codetree[s][0]>0:
posnext = pos + 1
if posnext not in subgraph[pos0]:
subgraph[pos0][posnext] = 0
subgraph[pos0][posnext] = max(subgraph[pos0][posnext],codetree[s][0])
pos += 1
codetree = codetree[s][1]
def add_subwords_reverse(codetree,tword,pos,subgraph):
posright = pos
while pos >= 2:
s = tword[pos-1]
if s not in codetree:
return
else:
if codetree[s][0]>0:
posleft = pos - 1
if posright not in subgraph[posleft]:
subgraph[posleft][posright] = 0
subgraph[posleft][posright] = max(subgraph[posleft][posright],codetree[s][0])
pos -= 1
codetree = codetree[s][1]
def create_subgraph(precodetree,postcodetree,tword):
subgraph = [{} for i in range(len(tword))]
for pos in range(0,len(subgraph)-1):
add_subwords(precodetree,tword,pos,subgraph)
# for pos in range(len(subgraph),0,-1):
# add_subwords_reverse(postcodetree,tword,pos,subgraph)
add_subwords_reverse(postcodetree,tword,len(subgraph),subgraph)
return subgraph
def analyze_subgraph(subgraph,word,track="",pos=0,freq="",leng=0):
if pos==len(word):
if leng<=3:
print(track,freq)
else:
if len(track)>0:
track += "-"
freq+=" "
for nextpos in subgraph[pos]:
nextfreq = subgraph[pos][nextpos]
analyze_subgraph(subgraph,word,track+word[pos:nextpos],nextpos,freq+str(nextfreq),leng+1)
# === Generic heuristics BEGIN
nonprefixes_dict = {}
vowels=u"aāeēiīoōuūy";
vowdict={}
for v in vowels:
vowdict[v]=1
def containsvowel(word):
for s in word:
if s in vowdict: return True
return False
def is_good_part_generic(part,word=''):
return (
part.isalpha()
and part.islower()
and containsvowel(part)
)
# === Generic heuristics END
# === English specific heuristics BEGIN
nonprefixes_en = ["non","un","im"]
nonprefixes_dict_en={}
for v in nonprefixes_en:
nonprefixes_dict_en[v]=1
def is_good_root_en(part,word):
return len(part)>2 and is_good_part_generic(part)
def is_good_postfix_en(part):
if len(part)<=2:
return is_good_ending_en(part) or part in ["ly"]
elif len(part)>5:
return False
else:
if part in ["ment","ling","ness"]: return True
if not is_good_part_generic(part):
return False
if part[0] not in vowdict:
return False
return True
def is_good_ending_en(part):
return part in ["s","ed","e","y","es","er","ies"]
def is_good_prefix_en(part):
return is_good_part_generic(part)
# === English specific heuristics END
# === Latvian specific heuristics BEGIN
nonprefixes_lv = ["ne"]
nonprefixes_dict_lv={}
for v in nonprefixes_lv:
nonprefixes_dict_lv[v]=1
vowels_not_o=u"aāeēiīōuūy";
vowdict_not_o={}
for v in vowels_not_o:
vowdict_not_o[v]=1
badrootstart_lv = "cčjlļmnņr"
badrootstart_dict_lv={}
for v in badrootstart_lv:
badrootstart_dict_lv[v]=1
badrootend_lv = ["šs"]
badrootend_dict_lv={}
for v in badrootend_lv:
badrootend_dict_lv[v]=1
def is_good_root_lv(root,word):
# if len(root)<=2: return False
if root[-1] in vowdict_not_o: return False
if root[-1] == "o" and len(root)<4: return False
if root[-2] in ['p','t'] and root[-1] not in ['l','r','j','n','t','s','o']: return False
if len(root)==len(word) and len(root)<4: return False
if root[1] not in vowdict and root[0] in badrootstart_dict_lv: return False
if root[-2:] in badrootend_dict_lv: return False
return is_good_part_generic(root)
def is_good_postfix_lv(part):
if len(part)==1:
if part in vowdict: return True
elif part in ["t","s","š"]: return True
else: return False
else:
if not is_good_part_generic(part):
return False
if part[-1] not in vowdict and part[-1] not in ["m","s","š","t"]: return False
if len(part)==2:
# postfixes of length 2 should contain vowel at position 0 (LATVIAN?)
if part[0] not in vowdict or part[-1]=="o":
return False
else: # postfix length 3 or more
if part=="sies": return True
if part=="ties": return True
if part[:3]=="šan": return True
if part[:3]=="nīc": return True
if part[:4]=="niek": return True
if part[:4]=="niec": return True
if part[:4]=="nieč": return True
if not containsvowel(part[0]):
return False
return True
def is_good_ending_lv(part):
""" Is ending in Latvian, assuming it is good postfix
"""
if len(part)>4: return False
elif len(part)==4:
if part in ["sies","ties"]: return True
elif len(part)==3:
if part in ["iem","ies","ais"]: return True
elif len(part)==2:
if part[-1]=="š": return False
elif part[0] in vowdict and part[1] in vowdict:
if part in ["ai","ie","ei"]: return True
else: return False
elif part in ["om","ūs","et","ut","ūt"]: return False
else: return True
else: # length = 1
return True
return False
def is_good_prefix_lv(part):
return is_good_part_generic(part)
# === Latvian specific heuristics END
def add_heuristics(lang=''):
lang = lang.lower()
global is_good_prefix
global is_good_root
global is_good_postfix
global is_good_ending
global nonprefixes_dict
if lang=='lv':
is_good_prefix = is_good_prefix_lv
is_good_root = is_good_root_lv
is_good_postfix = is_good_postfix_lv
is_good_ending = is_good_ending_lv
nonprefixes_dict = nonprefixes_dict_lv
elif lang=='en':
is_good_prefix = is_good_prefix_en
is_good_root = is_good_root_en
is_good_postfix = is_good_postfix_en
is_good_ending = is_good_ending_en
nonprefixes_dict = nonprefixes_dict_en
else:
lang = 'unspecified'
is_good_prefix = is_good_prefix_en
is_good_root = is_good_root_en
is_good_postfix = is_good_postfix_en
is_good_ending = is_good_ending_en
nonprefixes_dict = nonprefixes_dict_en
sys.stderr.write('Heuristics: {0}\n'.format(lang))
def analyze_prefixes(prefsource,rootsource,vocab,rawprevocab,preffile=None,loadfile=False):
""" Collect candidate prefixes
"""
prefixes = Counter()
if loadfile:
if preffile is not None:
for line in preffile:
entry = line.split()
prefixes[entry[0]] = int(entry[1])
| |
event_pair in event_matches.items():
edges = self.graph.get_edges(event)
new_edges = []
for edge in edges:
(vertex, type) = edge
new_edges.append((event_matches[vertex], type))
new_edges = set(new_edges)
edges_pair = set(other.graph.get_edges(event_pair))
if new_edges != edges_pair:
return False
return True
def __str__(self):
res = str(self.graph)
res += "\nlabels:\n "
for k in self.labels:
res += "\t" + str(k) + " " + self.labels[k] + "\n"
return res
class TracesList:
DIR = 'traces'
def __init__(self):
self.executions = []
self.unique_executions = []
self._files = [name for name in os.listdir(self.DIR) if os.path.isfile(os.path.join(self.DIR, name))]
self._files.sort()
@property
def get_number_executions(self):
return len(self.executions)
def populate_list(self, maximum_traces=None, unique=True, shuffle=False):
for file_nr, file in enumerate(self._files):
if maximum_traces:
if file_nr >= int(maximum_traces):
break
ex = Trace()
ex.get_execution(os.path.join(self.DIR, file))
self.executions.append(ex)
print(str(self.get_number_executions) + " traces read")
if unique:
self.remove_duplicates()
if shuffle:
random.shuffle(self.executions)
random.shuffle(self.unique_executions)
@property
def get_number_unique_executions(self):
return len(self.unique_executions)
def print_files(self):
for file in self._files:
print(file)
def remove_duplicates(self):
unique_fence =[]
for i in range(len(self.executions)):
unique = True
for j in range(i + 1, len(self.executions)):
if self.executions[i] == self.executions[j]:
unique = False
if unique:
self.unique_executions.append(self.executions[i])
if self.executions[i].has_fences:
unique_fence.append(self.executions[i])
print(str(len(self.unique_executions)) + " unique disallowed traces")
print(str(len(unique_fence)) + " unique allowed traces")
class HardwareTests:
"""
Contains the tests for the actual hardware
"""
HARDWARE_TESTS_FOLDER = "litmus_tests"
HARDWARE_CPU_THREADS = 4
HARDWARE_FPGA_THREADS = 1
def __init__(self, executions):
self.executions = executions
self.create_folder(self.HARDWARE_TESTS_FOLDER)
@staticmethod
def create_folder(path):
try:
os.mkdir(path)
except OSError:
pass
else:
print("Successfully created the directory %s " % path)
def create_all_traces(self, remove_fences=False):
software_executables = []
for number, ex in enumerate(self.executions):
if remove_fences and not ex.has_fences:
continue
if ex.get_number_cpu_threads > self.HARDWARE_CPU_THREADS or \
ex.get_number_fpga_threads > self.HARDWARE_FPGA_THREADS or \
ex.get_number_channels >= len(CHANNEL_ENCODING)-1 or \
ex.get_number_locations > len(LOCATION_ENCODING):
continue
software_executables.append(self.create_hardware_trace(number, ex))
return software_executables
def create_hardware_trace(self, number, ex):
# Create folder structure
test_name = "test_" + str(number)
folder = self.HARDWARE_TESTS_FOLDER + "/test"
self.create_folder(folder)
folder_sw = folder + "/sw_" + str(number)
self.create_folder(folder_sw)
# Create the makefile
makefile_sw = folder_sw + "/" + "Makefile"
with open("template/sw/Makefile") as template:
with open(makefile_sw, "w") as output:
for line in template:
output.write(line.replace("<test_name>", test_name))
# Create the .h file
h_file = folder_sw + "/" + test_name + ".h"
with open("template/sw/template.h") as template:
with open(h_file, "w") as output:
for line in template:
output.write(line.replace("<test_name>", test_name))
# Create the .cpp file
h_file = folder_sw + "/" + test_name + ".cpp"
with open("template/sw/template.cpp") as template:
with open(h_file, "w") as output:
for line in template:
new_line = line.replace("<test_name>", test_name)
new_line = new_line.replace("<thread_declaration>", self._cpu_thread_generation(ex))
new_line = new_line.replace("<cpu_threads>", self._cpu_thread_control_generation(ex))
new_line = new_line.replace("<fpga_thread>", self._cpu_fpga_instruction(ex))
new_line = new_line.replace("<assert_test>", self._generate_assertions(ex))
output.write(new_line)
return (folder_sw, test_name)
def _generate_assertions(self, ex):
code = "\t\t\t\t\tif( (\n"
read_list =[]
write_list = []
for event in ex.labels:
r_event = event
if ex.labels[r_event] in ["WrRsp", "CpuWrite"] and not ex.graph.get_outer_degree(r_event, "co"):
code += " (" + str(VARIABLES[ex.location[r_event]]) +"_buf[i*ELEM_LINE]"
if ex.labels[r_event] == "WrRsp":
r_event = ex.graph.get_first_node(r_event, "writepair")
code += " == " + str(ex.write_value[r_event]) + ") && \n "
for event in ex.labels:
if ex.labels[event] in ["CpuRead", "RdRsp"]:
read_vertex = ex.graph.get_first_node(event, "rf")
if ex.labels[event] == "CpuRead":
read_list.append("r" + str(ex.write_value[event]) + "[i]" )
else:
read_list.append( "read_registers_buf[i*ELEM_LINE + " + str(ex.sequence[event]) + "]")
if read_vertex:
if ex.labels[read_vertex] == "WrRsp":
read_vertex = ex.graph.get_first_node(read_vertex, "writepair")
write_list.append(str(ex.write_value[read_vertex]))
else:
write_list.append("42")
for i in range(len(read_list) - 1):
code += " (" + read_list[i] + " == " + write_list[i] + ") &&\n"
code += " (" + read_list[-1] + " == " + write_list[-1] + ") \n"
code += " ) \n ) { \n"
code += "\t valid_test = 0;\n"
code += "}\n"
return code
def _cpu_fpga_instruction(self, ex):
fpga_thread = None
for event in ex.threads:
if event in ex.fpga_threads:
fpga_thread = ex.threads[event]
fpga_thread_body = self._prepare_instruction_csr(ex, fpga_thread, 0)
fpga_thread_body += self._prepare_instruction_csr(ex, fpga_thread, 1)
fpga_thread_body += self._prepare_instruction_csr(ex, fpga_thread, 2)
fpga_thread_body += self._prepare_instruction_csr(ex, fpga_thread, 3)
return fpga_thread_body
def _prepare_instruction_csr(self, ex, fpga_thread, offset):
fpga_thread_body = "\t\twriteTestCSR(" + str(offset + 11) + ",\n"
for i in range(0, 2):
for event in ex.threads:
if (ex.threads[event] == fpga_thread) and (ex.sequence[event] == i + 2*offset):
fpga_thread_body += "\t\t\t\t\t\t\t" + INSTRUCTION_ENCODING[ex.labels[event]].ljust(9)
fpga_thread_body += "<< " + str(i * 13).ljust(2) + "\t|" + "\t\t// Event " + str(
i + 2 * offset) + "\n"
try:
fpga_thread_body += "\t\t\t\t\t\t\t" + LOCATION_ENCODING[ex.location[event]].ljust(9)
fpga_thread_body += "<< " + str(i * 13 + 3).ljust(2) + "\t| \n"
except:
# Fences do not have a location
fpga_thread_body += "\t\t\t\t\t\t\t" + "0".ljust(9)
fpga_thread_body += "<< " + str(i * 13 + 3).ljust(2) + "\t| \n"
fpga_thread_body += "\t\t\t\t\t\t\t" + CHANNEL_ENCODING[ex.channel[event]].ljust(9)
fpga_thread_body += "<< " + str(i * 13 + 5).ljust(2) + "\t| \n"
fpga_thread_body += "\t\t\t\t\t\t\t" + str(ex.mdata[event]).ljust(9)
fpga_thread_body += "<< " + str(i * 13 + 7).ljust(2) + "\t|" + "\t\t// Mdata " + "\n"
try:
fpga_thread_body += "\t\t\t\t\t\t\t" + str(ex.write_value[event]).ljust(9)
fpga_thread_body += "<< " + str(i * 13 + 10).ljust(2) + "\t|" + "\t\t// Write value " + "\n"
except:
fpga_thread_body += "\t\t\t\t\t\t\t" + "0".ljust(9)
fpga_thread_body += "<< " + str(i * 13 + 10).ljust(2) + "\t|" + "\t\t// Write value " + "\n"
fpga_thread_body += "\t\t\t\t\t\t\t0);\n"
return fpga_thread_body
def _cpu_thread_control_generation(self, ex):
thread_body = ""
for thread in range(0, ex.get_number_threads):
for event in ex.threads:
if (ex.threads[event] == thread) and (event in ex.cpu_threads):
thread_body += "\tstd::thread t" + str(thread) + " (thread" + str(thread)
thread_body += ", x_buf, y_buf, z_buf, repeat" + ");\n"
break
thread_body += "\n"
for thread in range(0, ex.get_number_threads):
for event in ex.threads:
if (ex.threads[event] == thread) and (event in ex.cpu_threads):
thread_body += "\tt" + str(thread) + ".join();\n "
break
return thread_body
def _prepare_cpu_thread_declaration(self, ex, thread):
operations = ex.get_thread_operations(thread)
thread_body = "void thread" + str(thread)
thread_body += "(volatile uint64_t * x, volatile uint64_t * y, volatile uint64_t * z, uint64_t repeat) {\n"
thread_body += "\tfor (int i=0; i < repeat; i++) {\n"
for i in range(operations):
for event in ex.threads:
if (ex.threads[event] == thread) and (ex.sequence[event] == i):
if ex.labels[event] == "CpuWrite":
thread_body += "\t\t" + VARIABLES[ex.location[event]]
thread_body += "[i*ELEM_LINE] = " + str(ex.write_value[event]) + ";" + "\n"
if ex.labels[event] == "CpuRead":
thread_body += "\t\t" + "r" + str(ex.write_value[event]) + "[i]="
thread_body += VARIABLES[ex.location[event]] + "[i*ELEM_LINE];\n"
if ex.labels[event] == "CpuFence":
thread_body += "\t\t" + "std::atomic_thread_fence(std::memory_order_seq_cst);" + "\n"
thread_body += "\t } \n } \n\n"
return thread_body
def _cpu_thread_generation(self, ex):
thread_declaration = ""
for thread in range(0, ex.get_number_threads):
for event in ex.threads:
if (ex.threads[event] == thread) and (event in ex.cpu_threads):
thread_declaration += self._prepare_cpu_thread_declaration(ex, thread)
break
return thread_declaration
class CbmcTests:
"""
Generates the H file and the CPP files for the CBMC test
"""
CBMC_CPU_THREADS = 2
CBMC_FPGA_THREADS = 1
CBMC_VARIABLES = 5
def __init__(self, executions):
self._executions = None
self.alloy_traces_h = None
self.alloy_traces_c = None
self._executions = executions
@staticmethod
def pretty_item(event, label, variable, write_value, mdata, channel):
code = ""
code += str(event).ljust(6)
code += str(label).ljust(10)
code += str(variable).ljust(6)
code += str(write_value).ljust(6)
code += str(mdata).ljust(6)
code += str(channel).ljust(6)
code += "\n"
return code
def pretty_print(self, execution):
code = "/*\n"
code += "Filename: " + execution.filename + "\n"
for thread in range(execution.get_number_threads):
code += "Thread " + str(thread) + "\n"
code += "Label".ljust(6) + "Event".ljust(10) + "Loc".ljust(6) + "Val".ljust(6) + "Mdata".ljust(
6) + "Channel".ljust(6)
code += "\n"
for operation in range(execution.get_thread_operations(thread)):
for event in execution.labels:
if execution.threads[event] == thread and execution.sequence[event] == operation:
if execution.labels[event] == "WrReq":
code += self.pretty_item(event[-1],
execution.labels[event],
VARIABLES[execution.location[event]],
execution.write_value[event],
execution.mdata[event],
CHANNEL_ENCODING[execution.channel[event]])
if execution.labels[event] == "WrRsp":
code += self.pretty_item(event[-1],
execution.labels[event],
VARIABLES[execution.location[event]],
"",
execution.mdata[event],
CHANNEL_ENCODING[execution.channel[event]])
if execution.labels[event] == "FnReq":
code += self.pretty_item(event[-1],
execution.labels[event],
"",
"",
execution.mdata[event],
CHANNEL_ENCODING[execution.channel[event]])
if execution.labels[event] == "FnRsp":
code += self.pretty_item(event[-1],
execution.labels[event],
"",
"",
execution.mdata[event],
CHANNEL_ENCODING[execution.channel[event]])
if execution.labels[event] == "FnReqAny":
code += self.pretty_item(event[-1],
"FnReq",
"",
"",
execution.mdata[event],
CHANNEL_ENCODING[execution.channel[event]])
if execution.labels[event] == "FnRspAny":
code += self.pretty_item(event[-1],
"FnRsp",
"",
"",
execution.mdata[event],
CHANNEL_ENCODING[execution.channel[event]])
if execution.labels[event] == "RdReq":
code += self.pretty_item(event[-1],
execution.labels[event],
VARIABLES[execution.location[event]],
"",
execution.mdata[event],
CHANNEL_ENCODING[execution.channel[event]])
if execution.labels[event] == "RdRsp":
code += self.pretty_item(event[-1],
execution.labels[event],
VARIABLES[execution.location[event]],
"",
execution.mdata[event],
CHANNEL_ENCODING[execution.channel[event]])
if execution.labels[event] == "CpuRead":
code += self.pretty_item(event[-1],
execution.labels[event],
VARIABLES[execution.location[event]],
"",
"",
"")
if execution.labels[event] == "CpuWrite":
code += self.pretty_item(event[-1],
execution.labels[event],
VARIABLES[execution.location[event]],
execution.write_value[event],
"",
"")
if execution.labels[event] == "CpuFence":
code += self.pretty_item(event[-1],
execution.labels[event],
"",
"",
"",
"")
code += "\n"
read_list = []
| |
# This file implements spiking neural networks as described
# in the work:
# <NAME>, Coarse scale representation of spiking neural networks:
# backpropagation through spikes and applications to neuromorphic hardware,
# International Conference on Neuromorphic Systems (ICONS), 2020
import argparse
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torchvision import datasets, transforms
import numpy as np
import math as m
from spikingnet import SpikingLayer, SpikingVextLayer, poisson_spikes
class SpikingShallowNetwork(nn.Module):
def __init__(self, Nin, Nout, Nsp, t1, beta=5, scale=1):
super(SpikingShallowNetwork, self).__init__()
self.Nsp = Nsp
self.Nout = Nout
self.Nin = Nin
self.l1 = nn.Linear(self.Nin, self.Nout, bias=None)
# torch.nn.init.constant_(self.l1.weight, 0.0005)
self.sl = SpikingLayer(t1, beta=beta)
self.scale = scale
def forward(self, x, device):
x = x.view(-1, 28*28)
s = torch.zeros(x.shape[0], self.Nout).to(device)
v = torch.zeros(x.shape[0], self.Nout).to(device)
nsp = torch.zeros(x.shape[0], self.Nout).to(device)
for i in range(self.Nsp):
xi = poisson_spikes(x, self.scale).to(device)
xi = self.l1(xi)
s, v = self.sl(xi, s, v)
nsp += s
return nsp
class SpikingHiddenNetwork(nn.Module):
def __init__(self, Nin, Nhid, Nout, Nsp, t1, t2, beta=5, scale=1):
super(SpikingHiddenNetwork, self).__init__()
self.Nsp = Nsp
self.Nhid = Nhid
self.Nout = Nout
self.l1 = nn.Linear(Nin, self.Nhid)
self.l2 = nn.Linear(self.Nhid, self.Nout, bias=None)
self.sl1 = SpikingLayer(t1, beta=beta)
self.sl2 = SpikingLayer(t2, beta=beta)
self.scale = scale
def forward(self, x, device):
x = x.view(-1, 28*28)
s1 = torch.zeros(x.shape[0], self.Nhid).to(device)
v1 = torch.zeros(x.shape[0], self.Nhid).to(device)
s2 = torch.zeros(x.shape[0], self.Nout).to(device)
v2 = torch.zeros(x.shape[0], self.Nout).to(device)
nsp = torch.zeros(x.shape[0], self.Nout).to(device)
for i in range(self.Nsp):
xi = poisson_spikes(x, self.scale).to(device)
s1, v1 = self.sl1(self.l1(xi), s1, v1)
xi = self.l2(s1)
s2, v2 = self.sl2(xi, s2, v2)
nsp += s2
return nsp
class SpikingConvNetwork(nn.Module):
def __init__(self, Nin, Nout, Nsp, t1, t2, beta=5, scale=1):
super(SpikingConvNetwork, self).__init__()
self.Nsp = Nsp
self.Nout = Nout
self.Nin = Nin
self.Nhid = 784
self.conv1 = nn.Conv2d(1, 4, (5,5), stride=2, padding=2)
self.l1 = nn.Linear(self.Nhid, self.Nout, bias=None)
self.sl1 = SpikingLayer(t1, beta=beta)
self.sl2 = SpikingLayer(t2, beta=beta)
self.scale = scale
def forward(self, x, device):
s1 = torch.zeros(x.shape[0], self.Nhid).to(device)
v1 = torch.zeros(x.shape[0], self.Nhid).to(device)
s2 = torch.zeros(x.shape[0], self.Nout).to(device)
v2 = torch.zeros(x.shape[0], self.Nout).to(device)
nsp = torch.zeros(x.shape[0], self.Nout).to(device)
for i in range(self.Nsp):
xi = poisson_spikes(x, self.scale).to(device)
xi = self.conv1(xi)
xi = xi.view(xi.shape[0],-1)
s1, v1 = self.sl1(xi, s1, v1)
xi2 = self.l1(s1)
s2, v2 = self.sl2(xi2, s2, v2)
nsp += s2
return nsp
class SpikingConvNetwork2(nn.Module):
def __init__(self, Nin, Nout, Nsp, t1, t2, beta=5, scale=1):
super(SpikingConvNetwork2, self).__init__()
self.Nsp = Nsp
self.Nout = Nout
self.Nin = Nin
self.Nhid1 = Nin
self.Nhid2 = 600
self.scale = scale
self.conv1 = nn.Conv2d(1, 4, (5,5), stride=2, padding=2)
self.l1 = nn.Linear(self.Nhid2, self.Nout, bias=None)
self.conv2 = nn.Conv2d(4, 6, (5,5), stride=1, padding=0)
self.sl1 = SpikingLayer(t1, beta=beta)
self.sl2 = SpikingLayer(t1, beta=beta)
self.sl3 = SpikingLayer(t2, beta=beta)
def forward(self, x, device):
s1 = torch.zeros(x.shape[0], 4, 14, 14).to(device)
v1 = torch.zeros(x.shape[0], 4, 14, 14).to(device)
s2 = torch.zeros(x.shape[0], 6, 10, 10).to(device)
v2 = torch.zeros(x.shape[0], 6, 10, 10).to(device)
s3 = torch.zeros(x.shape[0], self.Nout).to(device)
v3 = torch.zeros(x.shape[0], self.Nout).to(device)
nsp = torch.zeros(x.shape[0], self.Nout).to(device)
for i in range(self.Nsp):
xi = poisson_spikes(x,self.scale).to(device)
xi = self.conv1(xi)
s1, v1 = self.sl1(xi, s1, v1)
xi = self.conv2(s1)
s2, v2 = self.sl2(xi, s2, v2)
xi = s2.view(s2.shape[0],-1)
xi2 = self.l1(xi)
s3, v3 = self.sl3(xi2, s3, v3)
nsp += s3
return nsp
class SpikingLeNet5(nn.Module):
def __init__(self, Nsp, t1, t2, beta=5, scale=1):
self.Nsp = Nsp
super(SpikingLeNet5, self).__init__()
self.conv1 = nn.Conv2d(in_channels=1, out_channels=6,
kernel_size=5, stride=1, padding=2, bias=True)
self.max_pool_1 = nn.MaxPool2d(kernel_size=2)
self.conv2 = nn.Conv2d(in_channels=6, out_channels=16,
kernel_size=5, stride=1, padding=0, bias=True)
self.max_pool_2 = nn.MaxPool2d(kernel_size=2)
self.fc1 = nn.Linear(16*5*5, 120, bias=False)
self.fc2 = nn.Linear(120,84, bias=False)
self.fc3 = nn.Linear(84,10, bias=False)
self.sl1 = SpikingLayer(t1, beta=beta)
self.sl2 = SpikingLayer(t1, beta=beta)
self.sl3 = SpikingLayer(t2, beta=beta)
self.sl4 = SpikingLayer(t2, beta=beta)
self.sl5 = SpikingLayer(t2, beta=beta)
self.scale = scale
def build_x(self, x):
xi = torch.zeros_like(x)
xout = torch.rand_like(x)
xout[xout>self.scale*x] = 0.0
xout[xout>0] = 1.0
return xout
def forward(self, x, device):
s1 = torch.zeros(x.shape[0], 6, 28, 28).to(device)
v1 = torch.zeros(x.shape[0], 6, 28, 28).to(device)
s2 = torch.zeros(x.shape[0], 16, 10, 10).to(device)
v2 = torch.zeros(x.shape[0], 16, 10, 10).to(device)
s3 = torch.zeros(x.shape[0], 120).to(device)
v3 = torch.zeros(x.shape[0], 120).to(device)
s4 = torch.zeros(x.shape[0], 84).to(device)
v4 = torch.zeros(x.shape[0], 84).to(device)
s5 = torch.zeros(x.shape[0], 10).to(device)
v5 = torch.zeros(x.shape[0], 10).to(device)
nsp = torch.zeros(x.shape[0], 10).to(device)
for i in range(self.Nsp):
xi = self.build_x(x).to(device)
xi = self.conv1(xi)
s1, v1 = self.sl1(xi, s1, v1)
xi = self.max_pool_1(s1)
xi = self.conv2(xi)
s2, v2 = self.sl2(xi, s2, v2)
xi = self.max_pool_2(s2)
xi = xi.view(xi.shape[0],-1)
xi = self.fc1(xi)
s3, v3 = self.sl3(xi, s3, v3)
xi = self.fc2(s3)
s4, v4 = self.sl4(xi, s4, v4)
xi = self.fc3(s4)
s5, v5 = self.sl5(xi, s5, v5)
nsp += s5
return nsp
class SpikingLeNet5const(nn.Module):
def __init__(self, Nsp, t0, t1, t2, beta=5, scale=1):
self.Nsp = Nsp
super(SpikingLeNet5const, self).__init__()
self.conv1 = nn.Conv2d(in_channels=1, out_channels=6,
kernel_size=5, stride=1, padding=2, bias=True)
self.max_pool_1 = nn.MaxPool2d(kernel_size=2)
self.conv2 = nn.Conv2d(in_channels=6, out_channels=16,
kernel_size=5, stride=1, padding=0, bias=True)
self.max_pool_2 = nn.MaxPool2d(kernel_size=2)
self.fc1 = nn.Linear(16*5*5, 120, bias=False)
self.fc2 = nn.Linear(120,84, bias=False)
self.fc3 = nn.Linear(84,10, bias=False)
self.sl0 = SpikingVextLayer(t0, beta=beta)
self.sl1 = SpikingLayer(t1, beta=beta)
self.sl2 = SpikingLayer(t1, beta=beta)
self.sl3 = SpikingLayer(t2, beta=beta)
self.sl4 = SpikingLayer(t2, beta=beta)
self.sl5 = SpikingLayer(t2, beta=beta)
self.scale = scale
def build_x(self, x):
xi = torch.zeros_like(x)
xout = torch.rand_like(x)
xout[xout>self.scale*x] = 0.0
xout[xout>0] = 1.0
return xout
def forward(self, x, device):
s0 = torch.zeros(x.shape[0], 1, 28, 28).to(device)
v0 = torch.zeros(x.shape[0], 1, 28, 28).to(device)
s1 = torch.zeros(x.shape[0], 6, 28, 28).to(device)
v1 = torch.zeros(x.shape[0], 6, 28, 28).to(device)
s2 = torch.zeros(x.shape[0], 16, 10, 10).to(device)
v2 = torch.zeros(x.shape[0], 16, 10, 10).to(device)
s3 = torch.zeros(x.shape[0], 120).to(device)
v3 = torch.zeros(x.shape[0], 120).to(device)
s4 = torch.zeros(x.shape[0], 84).to(device)
v4 = torch.zeros(x.shape[0], 84).to(device)
s5 = torch.zeros(x.shape[0], 10).to(device)
v5 = torch.zeros(x.shape[0], 10).to(device)
nsp = torch.zeros(x.shape[0], 10).to(device)
for i in range(self.Nsp):
s0, v0 = self.sl0(x, s0, v0)
xi = self.conv1(s0)
s1, v1 = self.sl1(xi, s1, v1)
xi = self.max_pool_1(s1)
xi = self.conv2(xi)
s2, v2 = self.sl2(xi, s2, v2)
xi = self.max_pool_2(s2)
xi = xi.view(xi.shape[0],-1)
xi = self.fc1(xi)
s3, v3 = self.sl3(xi, s3, v3)
xi = self.fc2(s3)
s4, v4 = self.sl4(xi, s4, v4)
xi = self.fc3(s4)
s5, v5 = self.sl5(xi, s5, v5)
nsp += s5
return nsp
def train(args, model, device, train_loader, optimizer, epoch, scale=4):
model.train()
Nsp = model.Nsp
for batch_idx, (data, target) in enumerate(train_loader):
bsize = target.shape[0]
optimizer.zero_grad()
mtarget = target
mdata = data
data, mtarget = mdata.to(device), mtarget.to(device)
output = scale*(model(data, device)-0.5*model.Nsp)
loss = F.cross_entropy(output, mtarget)
loss.backward()
optimizer.step()
if batch_idx % 5 == 0:
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
epoch, batch_idx * len(data), len(train_loader.dataset),
100. * batch_idx / len(train_loader), loss.item()))
# print(output)
def test(args, model, device, test_loader, scale=4):
model.eval()
test_loss = 0
correct = 0
with torch.no_grad():
for data, target in test_loader:
mtarget = target
mdata = data
data, mtarget = mdata.to(device), mtarget.to(device)
output = scale*(model(data, device)-0.5*model.Nsp)
test_loss += F.cross_entropy(output, mtarget).item()
pred = output.max(1, keepdim=True)[1]
correct += pred.eq(target.view_as(pred).to(device)).sum().item()
test_loss /= len(test_loader.dataset)
print('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format(
test_loss, correct, len(test_loader.dataset),
100. * correct / len(test_loader.dataset)))
return 100. * correct / len(test_loader.dataset)
def train_mse(args, model, device, train_loader, optimizer, epoch):
model.train()
for batch_idx, (data, target) in enumerate(train_loader):
bsize = target.shape[0]
optimizer.zero_grad()
mtarget = torch.zeros(target.shape[0],10)
for i in range(target.shape[0]):
mtarget[i,target[i]]= args.spikes
data, target = data.to(device), mtarget.to(device)
output = model(data, device)
loss = F.mse_loss(output, target)
loss.backward()
optimizer.step()
if batch_idx % 5 == 0:
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
epoch, batch_idx * len(data), len(train_loader.dataset),
100. * batch_idx / len(train_loader), loss.item()))
# print(output)
def test_mse(args, model, device, test_loader):
model.eval()
Nst = model.Nsp
test_loss = 0
correct = 0
with torch.no_grad():
for data, target in test_loader:
mtarget = torch.zeros(target.shape[0],10)
for i in range(target.shape[0]):
mtarget[i,target[i]]= args.spikes
data, mtarget = data.to(device), mtarget.to(device)
output = model(data, device)
test_loss += F.mse_loss(output, mtarget, reduction='sum').item() # sum up batch loss
pred = output.max(1, keepdim=True)[1] # get the index of the max log-probability
correct += pred.eq(target.view_as(pred).to(device)).sum().item()
test_loss /= len(test_loader.dataset)
print('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format(
test_loss, correct, len(test_loader.dataset),
100. * correct / len(test_loader.dataset)))
return 100. * correct / len(test_loader.dataset)
def main():
# Training settings
parser = argparse.ArgumentParser(description='SpikingNet example')
parser.add_argument('name', metavar='N', type=str, nargs=1,
help='filename')
parser.add_argument('--batch-size', type=int, default=64, metavar='N',
help='input batch size for training (default: 64)')
parser.add_argument('--test-batch-size', type=int, default=1000, metavar='N',
help='input batch size for testing (default: 1000)')
parser.add_argument('--epochs', type=int, default=10, metavar='N',
help='number of epochs to train (default: 10)')
parser.add_argument('--lr', type=float, default=0.001, metavar='LR',
help='learning rate (default: 0.001)')
parser.add_argument('--no-cuda', action='store_true', default=False,
help='disables CUDA training')
parser.add_argument('--dataset', type=int, default=0, metavar='N',
help='dataset: mnist-0 fashionmnist-1')
| |
return ''
def normalize_rev(self, rev):
return to_unicode(self._resolve_rev(rev).oid.hex)
def short_rev(self, rev):
rev = self.normalize_rev(rev)
git_repos = self.git_repos
for size in xrange(self.shortrev_len, 40):
short_rev = rev[:size]
try:
git_object = git_repos[short_rev]
if git_object.type == GIT_OBJ_COMMIT:
return short_rev
except (KeyError, ValueError):
pass
return rev
def display_rev(self, rev):
return self.short_rev(rev)
def get_node(self, path, rev=None):
rev = self._stringify_rev(rev)
commit = self._resolve_rev(rev, raises=False)
if commit is None and rev:
raise NoSuchChangeset(rev)
return GitNode(self, self.normalize_path(path), commit)
def get_quickjump_entries(self, rev):
git_repos = self.git_repos
refs = sorted(
(self._from_fspath(name), git_repos.lookup_reference(name))
for name in git_repos.listall_references()
if name.startswith('refs/heads/') or name.startswith('refs/tags/'))
for name, ref in refs:
if name.startswith('refs/heads/'):
commit = self._get_commit(ref.target)
if commit:
yield 'branches', name[11:], '/', commit.hex
for name, ref in refs:
if name.startswith('refs/tags/'):
commit = self._get_commit(ref.target)
yield 'tags', name[10:], '/', commit.hex
def get_path_url(self, path, rev):
return self.params.get('url')
def get_changesets(self, start, stop):
seen_oids = set()
def iter_commits():
ts_start = to_timestamp(start)
ts_stop = to_timestamp(stop)
git_repos = self.git_repos
for name in git_repos.listall_references():
if not name.startswith('refs/heads/'):
continue
ref = git_repos.lookup_reference(name)
for commit in git_repos.walk(ref.target, _walk_flags):
ts = commit.commit_time
if ts < ts_start:
break
if ts_start <= ts <= ts_stop:
oid = commit.oid
if oid not in seen_oids:
seen_oids.add(oid)
yield ts, commit
for ts, commit in sorted(iter_commits(), key=lambda v: v[0],
reverse=True):
yield GitChangeset(self, commit)
def get_changeset(self, rev):
return GitChangeset(self, self._resolve_rev(rev))
def get_changeset_uid(self, rev):
return rev
def get_changes(self, old_path, old_rev, new_path, new_rev,
ignore_ancestry=0):
# TODO: handle ignore_ancestry
def iter_changes(old_commit, old_path, new_commit, new_path):
old_tree = self._get_tree(old_commit.tree, old_path)
old_rev = old_commit.hex
new_tree = self._get_tree(new_commit.tree, new_path)
new_rev = new_commit.hex
for old_file, new_file, status in \
self._get_changes(old_tree, new_tree):
action = _status_map.get(status)
if not action:
continue
old_node = new_node = None
if status != 'A':
old_node = self.get_node(
posixpath.join(old_path, old_file), old_rev)
if status != 'D':
new_node = self.get_node(
posixpath.join(new_path, new_file), new_rev)
yield old_node, new_node, Node.FILE, action
old_commit = self._resolve_rev(old_rev)
new_commit = self._resolve_rev(new_rev)
return iter_changes(old_commit, self.normalize_path(old_path),
new_commit, self.normalize_path(new_path))
def previous_rev(self, rev, path=''):
commit = self._resolve_rev(rev)
if not path or path == '/':
for parent in commit.parents:
return parent.hex
else:
node = GitNode(self, self.normalize_path(path), commit)
for commit, action in node._walk_commits():
for parent in commit.parents:
return parent.hex
def next_rev(self, rev, path=''):
rev = self.normalize_rev(rev)
path = self._to_fspath(self.normalize_path(path))
for name, ref, walker in self._iter_ref_walkers(rev):
if rev not in walker:
continue
for commit in walker.reverse(rev):
if not any(p.hex == rev for p in commit.parents):
continue
tree = commit.tree
entry = self._get_tree(tree, path)
if entry is None:
return None
for parent in commit.parents:
parent_tree = parent.tree
if tree.oid == parent_tree.oid:
continue
parent_entry = self._get_tree(parent_tree, path)
if entry is None or parent_entry is None or \
entry.oid != parent_entry.oid:
return commit.hex
rev = commit.hex
def parent_revs(self, rev):
commit = self._resolve_rev(rev)
return [c.hex for c in commit.parents]
def child_revs(self, rev):
def iter_children(rev):
seen = set()
for name, ref, walker in self._iter_ref_walkers(rev):
if rev not in walker:
continue
for commit in walker.reverse(rev):
if commit.oid in seen:
break
seen.add(commit.oid)
if any(p.hex == rev for p in commit.parents):
yield commit
return [c.hex for c in iter_children(self.normalize_rev(rev))]
def rev_older_than(self, rev1, rev2):
oid1 = self._resolve_rev(rev1).oid
oid2 = self._resolve_rev(rev2).oid
if oid1 == oid2:
return False
return any(oid1 == commit.oid
for commit in self.git_repos.walk(oid2, _walk_flags))
def get_path_history(self, path, rev=None, limit=None):
raise TracError(_("GitRepository does not support path_history"))
class GitNode(Node):
def __init__(self, repos, path, rev, created_commit=None):
self.log = repos.log
if type(rev) is pygit2.Commit:
commit = rev
rev = commit.hex
else:
if rev is not None and not isinstance(rev, unicode):
rev = to_unicode(rev)
commit = repos._resolve_rev(rev, raises=False)
if commit is None and rev:
raise NoSuchChangeset(rev)
tree_entry = None
filemode = None
tree = None
blob = None
if commit:
normrev = commit.hex
git_object = commit.tree
if path:
tree_entry = repos._get_tree_entry(git_object, path)
if tree_entry is None:
raise NoSuchNode(path, rev)
filemode = _get_filemode(tree_entry)
if filemode == _filemode_submodule:
git_object = None
else:
git_object = repos.git_repos.get(tree_entry.oid)
if git_object is None:
if filemode == _filemode_submodule:
kind = Node.DIRECTORY
else:
kind = None
elif git_object.type == GIT_OBJ_TREE:
kind = Node.DIRECTORY
tree = git_object
elif git_object.type == GIT_OBJ_BLOB:
kind = Node.FILE
blob = git_object
if kind is None:
raise NoSuchNode(path, rev)
else:
if path:
raise NoSuchNode(path, rev)
normrev = None
kind = Node.DIRECTORY
self.commit = commit
self.tree_entry = tree_entry
self.tree = tree
self.blob = blob
self.filemode = filemode
self.created_path = path # XXX how to use?
self._created_commit = created_commit
Node.__init__(self, repos, path, normrev, kind)
def _get_created_commit(self):
commit = self._created_commit
if commit is None and self.commit and self.rev:
_get_tree_entry = self.repos._get_tree_entry
path = self.repos._to_fspath(self.path)
commit = self.commit
entry = _get_tree_entry(commit.tree, path)
parents = commit.parents
if parents:
parent_entry = _get_tree_entry(parents[0].tree, path)
if parent_entry is not None and parent_entry.oid == entry.oid:
commit = None
for commit, action in self._walk_commits():
break
self._created_commit = commit or self.commit
return commit
@property
def created_rev(self):
commit = self._get_created_commit()
if commit is None:
return None
return commit.hex
def _walk_commits(self):
skip_merges = self.isfile
_get_tree = self.repos._get_tree
path = self.repos._to_fspath(self.path)
parent = parent_tree = None
for commit in self.repos.git_repos.walk(self.rev, _walk_flags):
if parent is not None and parent.oid == commit.oid:
tree = parent_tree
else:
tree = _get_tree(commit.tree, path)
parents = commit.parents
n_parents = len(parents)
if skip_merges and n_parents > 1:
continue
if n_parents == 0:
if tree is not None:
yield commit, Changeset.ADD
return
parent = parents[0]
parent_tree = _get_tree(parent.tree, path)
if tree is None:
if parent_tree is None:
continue
action = Changeset.DELETE
elif parent_tree is None:
action = Changeset.ADD
elif parent_tree.oid != tree.oid:
action = Changeset.EDIT
else:
continue
yield commit, action
def get_content(self):
if not self.isfile:
return None
return StringIO(self.blob.data)
def get_properties(self):
props = {}
if self.filemode is not None:
props['mode'] = '%06o' % self.filemode
return props
def get_annotations(self):
if not self.isfile:
return
annotations = []
for hunk in self.repos.git_repos.blame(
self.repos._to_fspath(self.path),
newest_commit=self.commit.oid):
commit_id = str(hunk.final_commit_id)
annotations.extend([commit_id] * hunk.lines_in_hunk)
return annotations
def get_entries(self):
if self.commit is None or self.tree is None or not self.isdir:
return
repos = self.repos
git_repos = repos.git_repos
_get_tree = repos._get_tree
_from_fspath = repos._from_fspath
path = repos._to_fspath(self.path)
names = sorted(entry.name for entry in self.tree)
def get_entries(commit):
tree = _get_tree(commit.tree, path)
if tree is None:
tree = ()
return dict((entry.name, entry) for entry in tree)
def is_blob(entry):
if entry:
return git_repos[entry.oid].type == GIT_OBJ_BLOB
else:
return True
def get_commits():
commits = {}
parent = parent_entries = None
for commit in git_repos.walk(self.rev, _walk_flags):
parents = commit.parents
n_parents = len(parents)
if n_parents == 0:
break
parent = parents[0]
if not parent and parent.oid == commit.oid:
curr_entries = parent_entries
else:
curr_entries = get_entries(commit)
parent_entries = get_entries(parent)
for name in names:
if name in commits:
continue
curr_entry = curr_entries.get(name)
parent_entry = parent_entries.get(name)
if not curr_entry and not parent_entry:
continue
object_changed = not curr_entry or not parent_entry or \
curr_entry.oid != parent_entry.oid
if n_parents > 1 and object_changed and \
is_blob(curr_entry) and is_blob(parent_entry):
continue # skip merge-commit if blob
if object_changed:
commits[name] = commit
if len(commits) == len(names):
break
return commits
commits = get_commits()
for name in names:
yield GitNode(repos, posixpath.join(self.path, _from_fspath(name)),
self.commit, created_commit=commits.get(name))
def get_content_type(self):
if self.isdir:
return None
return ''
def get_content_length(self):
if not self.isfile:
return None
return self.blob.size
def get_history(self, limit=None):
path = self.path
count = 0
for commit, action in self._walk_commits():
yield path, commit.hex, action
count += 1
if limit == count:
return
def get_last_modified(self):
if not self.isfile:
return None
commit = self._get_created_commit()
if commit is None:
return None
return self.repos._get_commit_time(commit)
class GitChangeset(Changeset):
"""A Git changeset in the Git repository.
Corresponds to a Git commit blob.
"""
def __init__(self, repos, rev):
self.log = repos.log
if type(rev) is pygit2.Commit:
commit = rev
rev = commit.hex
else:
commit = repos._resolve_rev(rev)
rev = commit.hex
author = repos._get_commit_username(commit)
date = repos._get_commit_time(commit)
self.commit = commit
Changeset.__init__(self, repos, rev, commit.message, author, date)
def get_branches(self):
return self.repos._get_branches_cset(self.rev)
def get_tags(self):
return self.repos._get_tags_cset(self.rev)
def get_properties(self):
properties = {}
commit = self.commit
if commit.parents:
properties['git-Parents'] = [c.hex for c in commit.parents]
if (commit.author.name != commit.committer.name or
commit.author.email != commit.committer.email):
properties['git-committer'] = commit.committer
properties['git-author'] = commit.author
branches = self.repos._get_branches(self.rev)
if branches:
properties['git-Branches'] = branches
tags = self.get_tags()
if tags:
properties['git-Tags'] = tags
children = self.repos.child_revs(self.rev)
if children:
properties['git-Children'] = children
return properties
def | |
<reponame>Skepay/Resume-Projects
# Contains all npcs and puzzles for the game.
#TODO: Convert to all object-oriented.
from src import *
#~/ SHOP \~#
class WanderingTraveler:
def __init__(self, player):
ClearConsole()
TypeOut("Wandering Traveler: Welcome to my shop!")
time.sleep(1.25)
self.name = "<NAME>"
self.shopItems = {
2 : "Water",
5 : "Very yummy Hot Dog",
10 : "Dope Jacket",
15 : "Small Health Potion",
30 : "Large Health Potion",
50 : "Sword"
}
while 1:
sg = self.Greeting()
if sg: sg = self.Greeting()
else: break
self.Leave()
return
def Greeting(self):
ClearConsole()
TypeOut("Wandering Traveler: Would you like to purchase something?\n(y/n)")
flush_input()
shop = ValidInput("->", ["y","n"])
if shop == "y":
self.Shop(player)
return 1
else:
TypeOut("Wandering Traveler: Ok.. farewell thy lost player!")
time.sleep(1)
return 0
def Shop(self,player):
ClearConsole()
validItems = []
# Print shop
print("\tSHOP ITEMS")
for index, key in enumerate(self.shopItems):
lenItem = 30-len(self.shopItems[key])
print("%g. %s%s$%g coins"%(index+1, self.shopItems[key], ' '*lenItem, key))
validItems.append(str(index+1))
print("Balance: %g"%player.coins)
print("\n")
playsound(boopSound, block=False)
flush_input()
purchase = ValidInput("What would you like to buy?\n->", validItems)
purchase = list(self.shopItems)[int(purchase)-1]
if purchase > player.coins:
ClearConsole()
playsound(boopSound, block=False)
TypeOut("Wandering Traveler: It doesn't seem like you can afford that.")
time.sleep(1)
return
player.coins -= purchase
if self.shopItems[purchase] == "Sword":
player.attackItems.append(self.shopItems[purchase])
player.inventory.append(self.shopItems[purchase])
ClearConsole()
playsound(newItem,block=False)
TypeOut("%s has been added to your inventory.."%self.shopItems[purchase])
input("\n\nPress any key to continue...")
return
def Leave(self):
ClearConsole()
playsound(boopSound, block=False)
TypeOut("Wandering Traveler: I'm off to explore some more, maybe I'll see you again!")
while 1:
newNpcRoom = random.choice(rooms)
if newNpcRoom.name not in range(player.room.name, player.room.name+5) and not newNpcRoom.npc:
newNpcRoom.npc = "Wandering Traveler"
break
player.room.npc = None
time.sleep(1.5)
return
#~/ NPCS \~#
# COLIN'S BIGDIKMAN
class BigDikman:
def __init__(self, player):
self.hp = random.choice([15,20,25])
self.name = self.GetDikmanName()
self.gameOver = False
self.Greeting()
if "Sword" in player.inventory: self.Battle(player)
else: self.Battle(player)
def GetDikmanName(self):
firstName = ["Alfred", "Charlie", "Betty", "Billy", "Hughbert", "Home", "Homie", "Cox", "Guy", "Frackles", "Adolf"]
lastName = ["CheezyDix", "Salsadeek", "Python", "Veenis", "Weiner", "Butch", "Longdong", "Girthman", "Snakerotch", "Moobs"]
name = (random.choice(firstName) + " " + random.choice(lastName))
return name
def Greeting(self):
ClearConsole()
TypeOut("???: YOU'RE GONNA HAVE TO FIGHT ME IF YOU WANNA GET PAST MY ELASTIC DIK.\n", newline=False)
time.sleep(.5)
TypeOut((self.name, ": ", self.GetRandomLine()))
time.sleep(1.5)
return
def GetRandomLine(self):
lines = [f"Whats up baby cakes? Its {self.name} time!", f"{self.name} is here to give you children! Through your nose!", "Time to choke!", "Smell the cheese?", "Do you like yogurt?", "It's long dong time!", "Yeah, I have a super power. A really long [REDACTED]", "Mine is longer than yours."]
return random.choice(lines)
def Battle(self, player):
ct = 0
attackDict = {"1" : player.Punch, "2" : player.Kick, "3" : player.GoForDik, "4" : player.UseSword, "5" : player.UseDikWhip}
while(self.hp > 0 and player.hp > 0):
if not self.gameOver:
ClearConsole()
ct+=1
ColorPrint("ROUND %g"%ct, inputColor=TextColor.red)
ColorPrint("%s HP: "%self.name, newLine=False); ColorPrint("%g"%self.hp, inputColor=TextColor.red)
ColorPrint("%s HP: "%player.name, newLine=False); ColorPrint("%g\n"%player.hp, inputColor=TextColor.red)
print('\n')
if ct == 1:
print("You attack first, what move would you like to use?")
else:
print("Your turn to attack, what move would you like to use?")
ColorPrint("1.",TextColor.red, newLine=False); ColorPrint(" Punch")
ColorPrint("2.", TextColor.red, newLine=False); ColorPrint(" Kick")
ColorPrint("3.", TextColor.red, newLine=False); ColorPrint(" Go for dik")
if(not (player.HasSpecialAttackItems())):
flush_input()
move = ValidInput("\n-> ", ["1","2","3"])
else:
moveIndex = 4
validMoves = ["1","2","3"]
for i in player.attackItems:
ColorPrint("%g."%moveIndex, TextColor.red,newLine=False); ColorPrint(" Use %s"%i)
validMoves.append(str(moveIndex))
moveIndex += 1
flush_input()
move = ValidInput("\n-> ", validMoves)
attackDict[move](self)
print("Press any key to continue...")
getch()
else: return
def Damage(self, dmg):
ClearConsole()
ColorPrint("YOU DAMAGED %s FOR %s."%(self.name, dmg), TextColor.red)
self.hp -= dmg
time.sleep(1.5)
ClearConsole()
if(self.hp > 0):
ClearConsole()
ColorPrint("%s: Hmmm. That didn't hurt, retard.\n"%self.name)
time.sleep(.5)
ColorPrint(self.GetRandomLine())
attackDmg = random.randint(1,3)
time.sleep(.5)
ColorPrint("\nYOU TOOK %g DAMAGE.\n"%attackDmg, TextColor.red)
player.hp -= attackDmg
time.sleep(1)
if player.hp <= 0:
ClearConsole()
diedFont = Figlet(font="slant")
time.sleep(1)
ColorPrint(str(diedFont.renderText("YOU DIED")), inputColor=TextColor.red)
time.sleep(2.5)
player.PlayerDie()
self.gameOver = True
return 0
else:
ClearConsole()
return self.Die()
def Die(self):
playsound(boopSound,block=False)
ColorPrint("%s: Ouch. That one hurt my dik."%self.name)
time.sleep(.5)
ColorPrint("%s HAS DIED."%self.name, TextColor.red)
time.sleep(2)
playsound(newItem,block=False)
bdItem = random.choice(['Large Health Potion', 'Dik Whip'])
ColorPrint("You have recieved a %s!\n\n"%bdItem, inputColor=TextColor.yellow)
player.inventory.append(bdItem)
player.room.npc = None
# PEWDIEPIE
class PewDiePie:
def __init__(self, player):
self.name = "PewDiePie"
self.pdpScore = 0
self.pewdsResponses = ['Wonderful job!', '20% Muscle Increase', 'Big PP', '*meme review*', 'HahHAhah HOWS IT GOIN BROES.. MY NAME IS PEWWWWWWWWWWWWWWDIEPIEEHHHHHHHHHHHH', 'Code PEWDIEPIE gets you 30% off Gfuel.com', 'Is for me?', 'Become a member today and get access to special videos!', 'www.youtube.com/pewdiepie', 'Gemhunter 2 is my favorite game!', 'Glug glug glug', 'Are ya winning, son?']
self.pdpTrivia = {
"What is the great price of my chair?" : ["399.99", "420.69", "399.90"],
"What is the name of my signature GFUEL flavor?" : ["Lingonberry", "Swedish Meatballs", "Pewds"],
"What year was it when I reached 50,000,000 subs?" : ["2016", "2015", "2013"],
"What is my Wife's name?" : ["<NAME>", "<NAME>", "<NAME>"],
"Which games does the infamous word \"BARRELS\" come from?" : ["Amnesia", "Resident Evil", "Paranormal Activity"],
"Before the \"9 Year olds,\" what was my fanbase called?" : ["Bro Army", "Floor Gang", "Gamers"],
"Where am I from?" : ["Sweden", "America", "UK"],
"What was he to me?" : ["A father", "A boyfriend", "A homie"],
"How old was I in 2014?" : ["25", "21", "23"],
"What is my favorite food?" : ["Pie", "Pizza", "Hotdogs"]
}
play = self.Greeting()
if play:
ClearConsole()
print("Please type your answers out.")
time.sleep(1)
ClearConsole()
while len(self.pdpTrivia) > 0:
self.Questions()
self.GameOver(player)
else:
ClearConsole()
playsound(spitSound, block=False)
TypeOut('PEWDIEPIE: Get out of my sight. atoo I spit on you')
time.sleep(1.5)
def Greeting(self):
ClearConsole()
TypeOut("???: HEY BRO ITS ME, PEWDIEPIEEEEEE.\n",newline=False)
TypeOut("PEWDIEPIE: Wanna play a little trivia? If you win I'll give you my chair.\n",newline=False)
flush_input()
playTrivia = ValidInput("(y/n)\n\n-> ",["y","n"])
if playTrivia == "y":
return True # play
return False # dont play
def RandomResponse(self):
line = random.choice(self.pewdsResponses)
TypeOut("PEWDIEPIE: %s"%line)
self.pewdsResponses.remove(line)
return
def Questions(self):
ClearConsole()
triviaQuestion = random.randint(0,len(self.pdpTrivia)-1) # gets a random question from the dictionary
answerList = list(self.pdpTrivia.items())[triviaQuestion][1].copy() # gets the answers for that questions and copies it (first item is correct answer)
random.shuffle(list(self.pdpTrivia.items())[triviaQuestion][1]) # shuffles the original answers
ColorPrint(list(self.pdpTrivia.items())[triviaQuestion][0]) # prints the question
# print answers
for index, answer in enumerate(list(self.pdpTrivia.items())[triviaQuestion][1]):
print("%s. %s\n"%(index+1, answer), end = '')
flush_input()
pdpTriviaGuess = ValidInput("\n-> ",[answerList[0],answerList[1],answerList[2]])
if pdpTriviaGuess == answerList[0]: # if their guess was right
self.pdpScore += 1
self.RandomResponse()
time.sleep(1)
else: # if their guess was wrong
self.pdpScore -= 1
ColorPrint("INCORRECT!", TextColor.red)
TypeOut("PEWDIEPIE: The correct answer was %s."%answerList[0])
#self.RandomResponse()
time.sleep(1.5)
del self.pdpTrivia[list(self.pdpTrivia.items())[triviaQuestion][0]]
def GameOver(self, player):
ClearConsole()
TypeOut("PEWDIEPIE: Hm.. you finished with a score of ", 0.06, newline=False); ColorPrint(str(self.pdpScore), TextColor.yellow)
time.sleep(1.5)
if self.pdpScore >= 5: # player win
TypeOut("PEWDIEPIE: Good enough for me!\nPEWDIEPIE: Take my chair, you were like a father to me afterall..\nPEWDIEPIE: Be seeing you, gamer.")
playsound(newItem,block=False)
ColorPrint("You recieved a PewDiePie 100M Edition Clutch Chair!", TextColor.red)
time.sleep(1)
TypeOut("To use the chair, interact with it in your inventory from the menu.")
player.inventory.append("PewDiePie 100M Edition Clutch Chair")
time.sleep(2)
else: # player loss
TypeOut("PEWDIEPIE: No, you know what.. you're a sucky gamer, you can't have my chair.")
playsound(spitSound, block=False)
TypeOut('PEWDIEPIE: Get out of my sight. atoo I spit on you')
time.sleep(2)
# Removes npc from room.
player.room.npc = None
input("Press any key to continue...")
# ELON MUSK
class Elon:
def __init__(self, player):
self.name = "<NAME>"
self.room = rooms.index(player.room)
play = self.Greeting()
if play:
self.driveRoadster(player)
self.GameOver()
else:
TypeOut("ELON MUSK: ...")
def Greeting(self):
ClearConsole()
TypeOut("???: H-h-h hey. I'm Elon.\n",newline=False)
TypeOut("ELON MUSK: Can you drive my roadster through these martian mountains? I'll let you build and use the next Boring Company tunnel if you do.\n",newline=False)
flush_input()
driveRoadster = ValidInput("(y/n)\n\n-> ", ["y", "n"])
if driveRoadster == "y":
return True # play
return False # dont play
def driveRoadster(self, player):
ClearConsole()
maze = ['########## #########', '# # # # #', '#### ### #### # ## #', '# # # # #', '## ### # ##### #', '# # ##### ###', '#### # # ### #', '# ### ## ## # #', '# # # # # ### #', '###### ## # # # #', '# # # # ##### #', '# # ##### ## # # #', '# ## # # ##', '# ############# ##', '# # # # ######', '## ### # | |
import datetime
from dateutil.relativedelta import relativedelta
from rest_framework import status, viewsets
from rest_framework.decorators import link, list_route
from rest_framework.exceptions import ParseError
from rest_framework.fields import ValidationError
from rest_framework.response import Response
from dbservice.apps.homes.models import Measurement
from dbservice.apps.users.models import User
from dbservice.apps.utils.viewsets import (BulkCreateModelMixin,
JSONSchemaViewSet)
from . import filters, models, serializers
from .aggregated import (aggregated, get_temperature_home,
response_virtual_energy_port_measurements)
from .condensed import condensed
from .status import get_status
from .utils import response_fixed_value_measurements, response_measurements
class ApplianceViewSet(viewsets.ModelViewSet):
"""
`/schemas/homes/appliances/list/`
`/schemas/homes/appliances/detail/` and
`/schemas/homes/appliances/update/`
Appliance may be filtered on `residential_home`.
"""
model = models.Appliance
serializer_class = serializers.ApplianceSerializer
filter_class = filters.ApplianceFilter
filter_fields = ('residential_home')
def list(self, request, *args, **kwargs):
try:
return super().list(request, *args, **kwargs)
except ValidationError as e:
raise ParseError(e)
def get_queryset(self):
qs = super().get_queryset()
user = self.request.user
if user.is_superuser:
return qs
else:
return qs.filter(residential_home__dno_customer_id=user)
# def create(self, request):
# ec_period = request.POST.get('energy_consumption_period', None)
# ep_period = request.POST.get('energy_production_period', None)
# import ipdb; ipdb.set_trace()
# oneflow_defined = bool(ec_period) != bool(ep_period)
# if not oneflow_defined:
# raise ParseError(
# "A single consumption/production period must be defined")
# serializer = ApplianceSerializer(data=request.DATA)
# if serializer.is_valid():
# serializer.save()
# return Response(serializer.data, status=status.HTTP_201_CREATED)
# return Response(serializer.errors,
# status=status.HTTP_400_BAD_REQUEST)
class ApplianceSchema(JSONSchemaViewSet):
schema_for = serializers.ApplianceSerializer
app_name = 'homes-v1'
class EnergyConsumptionPeriodViewSet(viewsets.ModelViewSet):
"""
Data format described in
`/schemas/homes/energy_consumption_period/list/`
`/schemas/homes/energy_consumption_period/detail/` and
`/schemas/homes/energy_consumption_period/update/`
Energy consumption period may be filtered on `appliance`.
"""
model = models.EnergyConsumptionPeriod
serializer_class = serializers.EnergyConsumptionPeriodSerializer
filter_class = filters.EnergyConsumptionPeriodFilter
filter_fields = ('appliance')
def list(self, request, *args, **kwargs):
try:
return super().list(request, *args, **kwargs)
except ValidationError as e:
raise ParseError(e)
def get_queryset(self):
qs = super().get_queryset()
user = self.request.user
if user.is_superuser:
return qs
else:
return qs.filter(appliance__residential_home__dno_customer_id=user)
class EnergyConsumptionPeriodSchema(JSONSchemaViewSet):
schema_for = serializers.EnergyConsumptionPeriodSerializer
app_name = 'homes-v1'
class EnergyProductionPeriodViewSet(viewsets.ModelViewSet):
"""
Data format described in
`/schemas/homes/energy_production_period/list/`
`/schemas/homes/energy_production_period/detail/` and
`/schemas/homes/energy_production_period/update/`
Energy production period may be filtered on `appliance`.
"""
model = models.EnergyProductionPeriod
serializer_class = serializers.EnergyProductionPeriodSerializer
filter_class = filters.EnergyProductionPeriodFilter
filter_fields = ('appliance')
def list(self, request, *args, **kwargs):
try:
return super().list(request, *args, **kwargs)
except ValidationError as e:
raise ParseError(e)
def get_queryset(self):
qs = super().get_queryset()
user = self.request.user
if user.is_superuser:
return qs
else:
return qs.filter(appliance__residential_home__dno_customer_id=user)
class EnergyProductionPeriodSchema(JSONSchemaViewSet):
schema_for = serializers.EnergyProductionPeriodSerializer
app_name = 'homes-v1'
class MainMeterViewSet(viewsets.ModelViewSet):
"""
Data format described in
`/schemas/homes/main_meters/list`
`/schemas/homes/main_meters/detail/` and
`/schemas/homes/main_meters/update`
Main meters may be filtered on `residential_home`.
"""
model = models.MainMeter
serializer_class = serializers.MainMeterSerializer
filter_class = filters.MainMeterFilter
filter_fields = ('residential_home')
def list(self, request, *args, **kwargs):
try:
return super().list(request, *args, **kwargs)
except ValidationError as e:
raise ParseError(e)
def get_queryset(self):
qs = super().get_queryset()
user = self.request.user
if user.is_superuser:
return qs
else:
return qs.filter(residential_home__dno_customer_id=user)
class MainMeterSchema(JSONSchemaViewSet):
schema_for = serializers.MainMeterSerializer
app_name = 'homes-v1'
class SubMeterViewSet(viewsets.ModelViewSet, BulkCreateModelMixin):
"""
Data format described in
`/schemas/homes/sub_meters/list/`
`/schemas/homes/sub_meters/detail/` and
`/schemas/homes/sub_meters/update/`.
Submeters may be filtered on `residential_home`.
Bulk creation possible at `/homes/sub_meters/bulk/`
(post JSON array of objects to create).
"""
model = models.SubMeter
serializer_class = serializers.SubMeterSerializer
filter_class = filters.SubMeterFilter
filter_fields = ('residential_home')
def list(self, request, *args, **kwargs):
try:
return super().list(request, *args, **kwargs)
except ValidationError as e:
raise ParseError(e)
def get_queryset(self):
qs = super().get_queryset()
user = self.request.user
if user.is_superuser:
return qs
else:
return qs.filter(residential_home__dno_customer_id=user)
class SubMeterSchema(JSONSchemaViewSet):
schema_for = serializers.SubMeterSerializer
app_name = 'homes-v1'
class MeterPortViewSet(viewsets.ModelViewSet, BulkCreateModelMixin):
"""
Data format described in
`/schemas/homes/meter_ports/list/`
`/schemas/homes/meter_ports/detail/` and
`/schemas/homes/meter_ports/update/`
Meter ports may be filtered on `mainmeter`, `submeter`,
`resource_type`, `unit`.
Configuration of a meter port can be obtained at
`/homes/meter_ports/{id}/`,
Bulk creation possible at `/homes/meter_ports/bulk/`
(post JSON array of objects to create).
"""
model = models.MeterPort
serializer_class = serializers.MeterPortSerializer
filter_class = filters.MeterPortFilter
def list(self, request, *args, **kwargs):
try:
return super().list(request, *args, **kwargs)
except ValidationError as e:
raise ParseError(e)
def get_queryset(self):
qs = super().get_queryset()
user = self.request.user
null_defs = ['None', 'none', 'Null', 'null', 'NULL']
meters = ['submeter', 'mainmeter']
myfilter = {"{}__isnull".format(key): True
for key, val in self.request.GET.items()
if key in meters and val in null_defs}
if len(myfilter) > 0:
return models.MeterPort.objects.filter(**myfilter)
if user.is_superuser:
return qs
else:
return (
qs.filter(mainmeter__residential_home__dno_customer_id=user) |
qs.filter(submeter__residential_home__dno_customer_id=user)
)
def create(self, request):
"""
Ensure that only one flow is associated to one submeter or main meter
is the only meter
"""
submeter_def = bool(request.POST.get('submeter', None))
mainmeter_def = bool(request.POST.get('mainmeter', None))
ec_def = bool(request.POST.get('energy_consumption_period', None))
ep_def = bool(request.POST.get('energy_production_period', None))
# We cannot associate a meterport to a submeter and a main meter
if submeter_def and mainmeter_def:
raise ParseError(
"Meter port cannot be associated to a submeter and main meter")
# If main meter is defined then the submeter cannot not be defined
if mainmeter_def:
if ec_def or ep_def:
raise ParseError(
'Meter port cannot be associated ' +
'to an energy consumption and production period ' +
'for a main meter'
)
# If submeter defined then ensure only one or none period is defined
if submeter_def:
if ec_def and ep_def:
raise ParseError(
"Only a single consumption/production period " +
"can be defined for submeters"
)
# Serialize the data and check if it is valid
serializer = serializers.MeterPortSerializer(
data=request.DATA, context={'request': request}
)
if serializer.is_valid():
serializer.save()
return Response(serializer.data, status=status.HTTP_201_CREATED)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
@link()
def get_measurements(self, request, pk=None):
return response_measurements(request, meter_port_pk=pk)
class MeterPortSchema(JSONSchemaViewSet):
schema_for = serializers.MeterPortSerializer
app_name = 'homes-v1'
class FixedValueMeterPortViewSet(viewsets.ModelViewSet, BulkCreateModelMixin):
"""
Data format described in
`/schemas/homes/fixed_value_meter_ports/list/`
`/schemas/homes/fixed_value_meter_ports/detail/` and
`/schemas/homes/fixed_value_meter_ports/update/`.
Configuration of a meter port can be obtained at
`/homes/fixed_value_meter_ports/{id}/`.
Virtual measurements are obtained at
`/homes/fixed_value_meter_ports/{id}/get_measurements/?from_timestamp={tf}&to_timestamp={tt}`.
Bulk creation possible at `/homes/fixed_value_meter_ports/bulk/`
(post JSON array of objects to create).
"""
user = User
model = models.FixedValueMeterPort
serializer_class = serializers.FixedValueMeterPortSerializer
def list(self, request, *args, **kwargs):
try:
return super().list(request, *args, **kwargs)
except ValidationError as e:
raise ParseError(e)
def get_queryset(self):
qs = super().get_queryset()
user = self.request.user
if user.is_superuser:
return qs
else:
return qs.filter(user=user)
@link()
def get_measurements(self, request, pk=None):
return response_fixed_value_measurements(request, pk)
class FixedValueMeterPortSchema(JSONSchemaViewSet):
schema_for = serializers.MeterPortSerializer
app_name = 'homes-v1'
class VirtualEnergyPortViewSet(viewsets.ModelViewSet):
"""
Data format described in
`/schemas/homes/virtual_energy_ports/list/`
`/schemas/homes/virtual_energy_ports/detail/` and
`/schemas/homes/virtual_energy_ports/update/`
A virtual energy port consists of four meterports that implicit represents
a energy consumption port; a main meter consumption meter port, a current
meterport, a voltage meterport and a power_factor meter port.
An aligned energy measurement set consists of a tuple of an accumulated
consumption start value, an accumulated stop value, an average current
value, an average voltage, and an average power factor within the time
period. The from_timestamp and to_timestamp indicate the time interval of
energy measurement. It can be obtained from
`/homes/virtual_energy_ports/{meter_port_id}/get_measurements/?from_timestamp={tf}&to_timestamp={tt}`
"""
model = models.VirtualEnergyPort
serializer_class = serializers.VirtualEnergyPortSerializer
def list(self, request, *args, **kwargs):
try:
return super().list(request, *args, **kwargs)
except ValidationError as e:
raise ParseError(e)
def get_queryset(self):
qs = super().get_queryset()
user = self.request.user
if user.is_superuser:
return qs
else:
return (
qs.filter(mainmeter__residential_home__dno_customer_id=user) |
qs.filter(submeter__residential_home__dno_customer_id=user)
)
@link()
def get_measurements(self, request, pk=None):
return response_virtual_energy_port_measurements(request, pk)
class VirtualEnergyPortSchema(JSONSchemaViewSet):
schema_for = serializers.VirtualEnergyPortSerializer
app_name = 'homes-v1'
class MeasurementViewSet(viewsets.ModelViewSet, BulkCreateModelMixin):
"""
Data format described in `/schemas/homes/measurements/list/`,
`/schemas/homes/measurements/detail/` and
`/schemas/homes/measurements/update/`.
Measurements may be filtered on `meter_port`,
`min_value`,`max_value`,`min_timestamp` and `max_timestamp`.
Condensed stored data for each input can be obtained at
`/homes/measurements/{meter_port_id}/hourly_condensed/`,
`/homes/measurements/{meter_port_id}/daily_condensed/`,
`/homes/measurements/{meter_port_id}/monthly_condensed/` and
`/homes/measurements/{meter_port_id}/yearly_condensed/`. For condensed
data, query parameters `from_timestamp` and `to_timestamp` must be
provided. The data format for condensed data is described in
`/static/condensed-list.json`.
Latest measurements can be viewed with `/homes/measurements/latest/`.
Bulk creation possible at `/homes/measurements/bulk/`
(post JSON array of objects to create).
"""
throttle_scope = 'measurements'
model = models.Measurement
serializer_class = serializers.MeasurementSerializer
filter_class = filters.MeasurementFilter
filter_fields = ('meter_port', 'timestamp', 'value')
def list(self, request, *args, **kwargs):
try:
return super().list(request, *args, **kwargs)
except ValidationError as e:
raise ParseError(e)
def get_queryset(self):
qs = super().get_queryset()
user = self.request.user
if user.is_superuser:
return qs
else:
return (
qs.filter(meter_port__mainmeter__residential_home__dno_customer_id=user) | # noqa
qs.filter(meter_port__submeter__residential_home__dno_customer_id=user) # noqa
)
@list_route()
def latest(self, request):
all_measurements = Measurement.objects.all().order_by('-timestamp')
page = self.paginate_queryset(all_measurements)
serializer = self.get_pagination_serializer(page)
return Response(serializer.data)
@link()
def hourly_condensed(self, request, pk=None):
return condensed(request, pk, datetime.timedelta(hours=1))
@link()
def daily_condensed(self, request, pk=None):
return condensed(request, pk, datetime.timedelta(days=1))
@link()
def monthly_condensed(self, request, pk=None):
return condensed(request, pk, relativedelta(months=1))
@link()
def yearly_condensed(self, request, pk=None):
return condensed(request, pk, relativedelta(years=1))
class MeasurementSchema(JSONSchemaViewSet):
schema_for = serializers.MeasurementSerializer
app_name = 'homes-v1'
class ResidentialHomeViewSet(viewsets.ModelViewSet):
"""
Data format described in `/schemas/homes/residential_homes/list`
`/schemas/homes/residential_homes/detail/` and
`/schemas/homes/residential_homes/update`.
Electrical energy consumption and production for the residential home can
be obtained at
`/homes/residential_homes/{home_id}/get_energy_{consumption,production}/
?from_timestamp={tf}&to_timestamp={tt}[&tau={tau}]`, where `{home_id}` is
the id of the residential home, `{tf}` and `{tt}` represent the
timeslot. `{tau}` is optional and can have following values 1min, 5min,
10min, 15min, 20min, 30min, hourly, daily, weekly, monthly, yearly.
Consumption data is based on main meter data and production data is based
on submeters.
Get status of the residential | |
role == "Administrator" or role == "admin" or role == "Agent" or role == "agent":
isAllowed = True
botlog.LogSymphonyInfo("Role of the calling user: " + role)
else:
isAllowed = False
botlog.LogSymphonyInfo("The calling user is a Zendesk " + role)
#################################################
botlog.LogSymphonyInfo(firstName + " " + lastName + " (" + displayName + ") from Company/Pod name: " + str(companyName) + " with UID: " + str(userID))
callerCheck = (firstName + " " + lastName + " - " + displayName + " - " + companyName + " - " + str(userID))
if callerCheck in AccessFile and isAllowed:
streamType = (messageDetail.ChatRoom.Type)
#print(streamType)
botlog.LogSymphonyInfo("User is part of the Agent list and is an Admin or Agent on Zendesk")
caller_raw = messageDetail.Sender.Name
caller_split = str(caller_raw).split(" ")
caller = caller_split[0]
# Parse the input
query = ""
results = ""
isIM = ""
message = (messageDetail.Command.MessageText)
message_split = message.split("|")
try:
userSplit = message_split[0]
userEntered = True
except:
userSplit = ""
userEntered = False
try:
organization = str(message_split[1])
orgEntered = True
except:
organization = ""
orgEntered = False
####################################
headers = {
'username': _configDef['zdesk_config']['zdesk_email'] + "/token",
'password': _configDef['zdesk_config']['zdesk_password'],
'authorization': _configDef['zdesk_config']['zdesk_auth'],
'cache-control': "no-cache",
'Content-Type': 'application/json',
}
url = _configDef['zdesk_config']['zdesk_url'] + "/api/v2/search"
# User search only
if userEntered and orgEntered == False:
#print("inside user and not org")
org_legnth = len(str(organization[1:]))
#print(int(org_legnth))
if org_legnth == "" or int(org_legnth) < 2:
return messageDetail.ReplyToChat("You did not enter a valid organization, please check and try again")
query += "type:user\"" + str(userSplit[1:]) + "\""
querystring = {"query": "type:user " + str(userSplit[1:]) + ""}
#print(querystring)
response = requests.request("GET", str(url), headers=headers, params=querystring)
data = response.json()
#print(str(data))
dataLenght = len(str(data))
#print(str(dataLenght))
# if dataLenght >= 50000:
# return messageDetail.ReplyToChat("There are few results for this user search, please try with the full name or/and with the company name")
noUserFound = "{'results': [], 'facets': None, 'next_page': None, 'previous_page': None, 'count': 0}"
if str(data).startswith(noUserFound):
return messageDetail.ReplyToChat("There is no user with this information")
if query == "type:user\"\"":
return messageDetail.ReplyToChat("You have searched for all users on your Zendesk, I will ignore this request to avoid any performance issue")
else:
botlog.LogSymphonyInfo("Getting user information from Zendesk")
messageDetail.ReplyToChat("Getting user information from Zendesk, please wait")
# User and Organisation search
if userEntered and orgEntered:
#print("inside user and org")
org_legnth = len(str(organization[1:]))
#print(int(org_legnth))
if org_legnth == "" or int(org_legnth) < 2:
return messageDetail.ReplyToChat("You did not enter a valid organization, please check and try again")
query += "type:user" + str(userSplit) + " organization:" + str(organization[1:])
botlog.LogSymphonyInfo("Query used " + str(query))
querystring = {"query": "type:user" + str(userSplit) + " organization:" + str(organization[1:]) + ""}
#botlog.LogSymphonyInfo("Entire query used " + str(querystring))
if str(query).startswith("type:user organization:"):
messageDetail.ReplyToChat("Please check your 1:1 IM with me to see the full list of users from " + str(organization[1:]))
messageDetail.ReplyToSenderv2("Hi " + str(caller) + ", Loading all users from Zendesk under organization <b>" + str(organization[1:]) + "</b>, please wait")
isIM = True
response = requests.request("GET", url, headers=headers, params=querystring)
data = response.json()
#print(str(data))
noRes = "{'results': [], 'facets': {'type': {'entry': 0, 'ticket': 0, 'organization': 0, 'user': 0, 'article': 0, 'group': 0}}, 'next_page': None, 'previous_page': None, 'count': 0}"
if str(data) == str(noRes):
return messageDetail.ReplyToChat("There is no result for this search: " + str(query))
if str(data).startswith("{'results': [], 'facets': None, 'next_page': None, 'previous_page': None, 'count': 0}"):
return messageDetail.ReplyToChat("This user does not exist on Zendesk, name is misspelled or does not belong to this organisation")
elif str(data).startswith("{'results': [], 'facets': {'type': {'entry': 0, 'ticket': 0, 'organization': 0, 'user': 0, 'article': 0, 'group': 0}}, 'next_page': None, 'previous_page': None, 'count': 0}"):
return messageDetail.ReplyToChat("This organisation/company does not exist in Zendesk or is misspelled")
else:
messageDetail.ReplyToChat("Getting user information from Zendesk, please wait")
table_body = ""
table_header = "<table style='border-collapse:collapse;border:2px solid black;table-layout:auto;width:100%;box-shadow: 5px 5px'><thead><tr style='background-color:#4D94FF;color:#ffffff;font-size:1rem' class=\"tempo-text-color--white tempo-bg-color--black\">" \
"<td style='width:25%;border:1px solid blue;border-bottom: double blue;text-align:center'>NAME</td>" \
"<td style='width:25%;border:1px solid blue;border-bottom: double blue;text-align:center'>EMAIL ADDRESS</td>" \
"<td style='width:25%;border:1px solid blue;border-bottom: double blue;text-align:center'>COMPANY</td>" \
"<td style='width:25%;border:1px solid blue;border-bottom: double blue;text-align:center'>ROLE</td>" \
"</tr></thead><tbody>"
for result in data['results']:
name = str(result["name"])
zdID = str(result["id"])
###############################
conn = http.client.HTTPSConnection(_configDef['zdesk_config']['zdesk_api'])
headers = {
'username': _configDef['zdesk_config']['zdesk_email'] + "/token",
'password': _configDef['zdesk_config']['zdesk_password'],
'authorization': _configDef['zdesk_config']['zdesk_auth'],
'cache-control': "no-cache",
'Content-Type': 'application/json',
}
# To get the name of the requester given the requesterID
conn.request("GET", "/api/v2/users/" + str(zdID) + "/organizations", headers=headers)
res = conn.getresponse()
organizationsID = res.read()
tempOrganizationsID = str(organizationsID.decode('utf-8'))
noOrgUser = '{"organizations":[],"next_page":null,"previous_page":null,"count":0}'
if tempOrganizationsID == noOrgUser:
noOrgUserFlag = True
else:
noOrgUserFlag = False
data_dict = json.loads(str(tempOrganizationsID))
data = json.dumps(data_dict, indent=2)
# data_dict = ast.literal_eval(data)
d_req = json.loads(data)
# data = json.dumps(tempOrganizationsID, indent=2)
# data_dict = ast.literal_eval(data)
# d_req = json.loads(str(data_dict))
try:
org_Name = str(d_req["organizations"][0]["name"])
org_name_temp = str(org_Name).replace("<", "<").replace("\"", """).replace("&","&").replace("'", "'").replace(">", ">")
org_Name = str(org_name_temp)
except:
org_Name = "None"
###############################
comData = org_Name
email = str(result["email"])
try:
organization_id = str(result["organization_id"])
except:
organization_id = "None"
userZRole = str(result["role"])
orglink = (_configDef['zdesk_config']['zdesk_org']) + str(organization_id) + "/tickets"
user_link = (_configDef['zdesk_config']['zdesk_user']) + str(zdID) + "/requested_tickets"
if noOrgUserFlag:
table_body += "<tr>" \
"<td style='border:1px solid black;text-align:center'><a href=\"" + str(user_link) + "\">" + str(name) + "</a></td>" \
"<td style='border:1px solid black;text-align:center'><a href=\"mailto:" + str(email) + "?Subject=Symphony%20Communication\">" + str(email) + "</a></td>" \
"<td style='border:1px solid black;text-align:center'>None</td>" \
"<td style='border:1px solid black;text-align:center'>" + str(userZRole) + "</td>" \
"</tr>"
else:
table_body += "<tr>" \
"<td style='border:1px solid black;text-align:center'><a href=\"" + str(user_link) + "\">" + str(name) + "</a></td>" \
"<td style='border:1px solid black;text-align:center'><a href=\"mailto:" + str(email) + "?Subject=Symphony%20Communication\">" + str(email) + "</a></td>" \
"<td style='border:1px solid black;text-align:center'><a href=\"" + str(orglink) + "\">" + str(comData) + "</a></td>" \
"<td style='border:1px solid black;text-align:center'>" + str(userZRole) + "</td>" \
"</tr>"
table_body += "</tbody></table>"
reply = table_header + table_body
if isIM:
#return messageDetail.ReplyToSenderv2_noBotLog("<card iconSrc=\"https://thumb.ibb.co/csXBgU/Symphony2018_App_Icon_Mobile.png\" accent=\"tempo-bg-color--blue\"><header>These are all the users under the organisation <b>" + organization[1:] + "</b></header><body>" + reply + "</body></card>")
return messageDetail.ReplyToSenderv2_noBotLog("<card iconSrc=\"\" accent=\"tempo-bg-color--blue\"><header>These are all the users under the organisation <b>" + organization[1:] + "</b></header><body>" + reply + "</body></card>")
else:
#return messageDetail.ReplyToChatV2_noBotLog("<card iconSrc =\"https://thumb.ibb.co/csXBgU/Symphony2018_App_Icon_Mobile.png\" accent=\"tempo-bg-color--blue\"><header>Please find the result below</header><body>" + reply + "</body></card>")
return messageDetail.ReplyToChatV2_noBotLog("<card iconSrc =\"\" accent=\"tempo-bg-color--blue\"><header>Please find the result below</header><body>" + reply + "</body></card>")
else:
return messageDetail.ReplyToChat("You aren't authorised to use this command. You are either not Added to the Bot as an Agent or you are not an Agent/Staff on Zendesk")
except:
return messageDetail.ReplyToChat("I am sorry, I was working on a different task, can you please retry")
####################################
######## Recent Tickets ##########
####################################
def recentZD(messageDetail):
botlog.LogSymphonyInfo("#################################################")
botlog.LogSymphonyInfo("Bot Call: Recent Zendesk Tickets raised or viewed")
botlog.LogSymphonyInfo("#################################################")
try:
table_bodyFull = ""
table_header = ""
allTicket = ""
isAllowed = False
counter = True
commandCallerUID = messageDetail.FromUserId
try:
connComp.request("GET", "/pod/v3/users?uid=" + commandCallerUID, headers=headersCompany)
except:
return messageDetail.ReplyToChat("I am having difficulty to find this user id: " + commandCallerUID)
resComp = connComp.getresponse()
dataComp = resComp.read()
data_raw = str(dataComp.decode('utf-8'))
# data_dict = ast.literal_eval(data_raw)
data_dict = json.loads(str(data_raw))
dataRender = json.dumps(data_dict, indent=2)
d_org = json.loads(dataRender)
for index_org in range(len(d_org["users"])):
firstName = d_org["users"][index_org]["firstName"]
lastName = d_org["users"][index_org]["lastName"]
displayName = d_org["users"][index_org]["displayName"]
#companyName = d_org["users"][index_org]["company"]
companyNameTemp = d_org["users"][index_org]["company"]
companyTemp = str(companyNameTemp).replace("&", "&").replace("<", "<").replace('"', """).replace("'", "'").replace(">", ">")
companyName = str(companyTemp)
userID = str(d_org["users"][index_org]["id"])
#################################################
try:
emailAddress = str(d_org["users"][index_org]["emailAddress"])
#print("User is connected: " + emailAddress)
emailZendesk = emailAddress
connectionRequired = False
except:
connectionRequired = True
# if connectionRequired:
data_lenght = len(dataComp)
if data_lenght > 450:
try:
#print("inside > 450")
query = "type:user " + emailAddress
except:
query = "type:user " + firstName + " " + lastName
botlog.LogSymphonyInfo(query)
elif data_lenght < 450:
try:
#print("inside < 450")
#query = "type:user " + emailAddress + " organization:" + companyName
query = "type:user " + emailAddress
except:
#query = "type:user " + firstName + " " + lastName + " organization:" + companyName
query = "type:user " + firstName + " " + lastName
botlog.LogSymphonyInfo(query)
else:
return messageDetail.ReplyToChat("No user information available")
botlog.LogSymphonyInfo(query)
results = zendesk.search(query=query)
#print(results)
if str(results).startswith(
"{'results': [], 'facets': None, 'next_page': None, 'previous_page': None, 'count': 0}"):
return messageDetail.ReplyToChat(
"This user does not exist on Zendesk, the name is misspelled or does not belong to this organisation.")
elif str(results).startswith(
"{'results': [], 'facets': {'type': {'entry': 0, 'ticket': 0, 'organization': 0, | |
import io
from atws.wrapper import Wrapper
from django.core.management import call_command
from django.test import TestCase
from djautotask.tests import fixtures, mocks, fixture_utils
from djautotask import models
def sync_summary(class_name, created_count, updated_count=0):
return '{} Sync Summary - Created: {}, Updated: {}, Skipped: 0'.format(
class_name, created_count, updated_count
)
def full_sync_summary(class_name, deleted_count, updated_count=0):
return '{} Sync Summary - Created: 0, Updated: {}, Skipped: 0, ' \
'Deleted: {}'.format(class_name, updated_count, deleted_count)
def slug_to_title(slug):
return slug.title().replace('_', ' ')
def run_sync_command(full_option=False, command_name=None):
out = io.StringIO()
args = ['atsync']
if command_name:
args.append(command_name)
if full_option:
args.append('--full')
call_command(*args, stdout=out)
return out
class AbstractBaseSyncRestTest(object):
def _test_sync(self, mock_call, return_value, at_object,
full_option=False):
mock_call(return_value)
out = io.StringIO()
args = ['atsync', at_object]
if full_option:
args.append('--full')
call_command(*args, stdout=out)
return out
def _title_for_at_object(self, at_object):
return at_object.title().replace('_', ' ')
def test_sync(self):
out = self._test_sync(*self.args)
obj_title = self._title_for_at_object(self.args[-1])
self.assertIn(obj_title, out.getvalue().strip())
def test_full_sync(self):
self.test_sync()
mock_call, return_value, at_object = self.args
args = [
mock_call,
{
"items": [],
"pageDetails": fixtures.API_PAGE_DETAILS
},
at_object
]
out = self._test_sync(*args, full_option=True)
obj_label = self._title_for_at_object(at_object)
msg_tmpl = '{} Sync Summary - Created: 0, Updated: 0, Skipped: 0, ' \
'Deleted: {}'
msg = msg_tmpl.format(obj_label, len(return_value.get('items')))
self.assertEqual(msg, out.getvalue().strip())
class PicklistSyncTest(AbstractBaseSyncRestTest):
def test_full_sync(self):
self.test_sync()
mock_call, return_value, at_object = self.args
args = [
mock_call,
{
"fields": []
},
at_object
]
out = self._test_sync(*args, full_option=True)
obj_label = self._title_for_at_object(at_object)
msg_tmpl = '{} Sync Summary - Created: 0, Updated: 0, Skipped: 0, ' \
'Deleted: {}'
msg = msg_tmpl.format(
obj_label, len(return_value.get('fields')[0].get('picklistValues'))
)
self.assertEqual(msg, out.getvalue().strip())
class TestSyncContactCommand(AbstractBaseSyncRestTest, TestCase):
args = (
mocks.service_api_get_contacts_call,
fixtures.API_CONTACT,
'contact',
)
def setUp(self):
super().setUp()
fixture_utils.init_contacts()
class AbstractBaseSyncTest(object):
def setUp(self):
mocks.init_api_connection(Wrapper)
mocks.init_api_rest_connection()
def _title_for_at_object(self, at_object):
return at_object.title().replace('_', ' ')
def get_api_mock(self):
return mocks.api_query_call
def get_return_value(self, at_object, fixture_list):
return fixture_utils.generate_objects(
at_object.title().replace('_', ''), fixture_list)
def init_sync_command(self, fixture_list, at_object, full_option=False):
return_value = self.get_return_value(at_object, fixture_list)
api_call = self.get_api_mock()
api_call(return_value)
output = run_sync_command(full_option, at_object)
return output
def _test_sync(self):
out = self.init_sync_command(*self.args)
obj_title = self._title_for_at_object(self.args[-1])
self.assertIn(obj_title, out.getvalue().strip())
def test_full_sync(self):
out = self.init_sync_command(*self.args)
fixture_list, at_object = self.args
args = [
[],
at_object,
]
out = self.init_sync_command(*args, full_option=True)
obj_label = self._title_for_at_object(at_object)
msg_tmpl = '{} Sync Summary - Created: 0, Updated: 0, Skipped: 0, ' \
'Deleted: {}'
value_count = len(fixture_list)
msg = msg_tmpl.format(obj_label, value_count)
self.assertEqual(msg, out.getvalue().strip())
class AbstractPicklistSyncCommandTest(AbstractBaseSyncTest):
def get_return_value(self, at_object, fixture_list):
field_info = fixture_utils.generate_picklist_objects(
self.field_name, fixture_list)
return field_info
def get_api_mock(self):
return mocks.api_picklist_call
class TestSyncTicketCommand(AbstractBaseSyncRestTest, TestCase):
args = (
mocks.service_api_get_tickets_call,
fixtures.API_TICKET,
'ticket',
)
def setUp(self):
super().setUp()
fixture_utils.init_tickets()
class TestSyncStatusCommand(PicklistSyncTest, TestCase):
args = (
mocks.service_api_get_ticket_picklist_call,
fixtures.API_STATUS_FIELD,
'status',
)
def setUp(self):
super().setUp()
fixture_utils.init_statuses()
class TestSyncPriorityCommand(PicklistSyncTest, TestCase):
args = (
mocks.service_api_get_ticket_picklist_call,
fixtures.API_PRIORITY_FIELD,
'priority',
)
def setUp(self):
super().setUp()
fixture_utils.init_priorities()
class TestSyncQueueCommand(PicklistSyncTest, TestCase):
args = (
mocks.service_api_get_ticket_picklist_call,
fixtures.API_QUEUE_FIELD,
'queue',
)
def setUp(self):
super().setUp()
fixture_utils.init_queues()
class TestSyncProjectStatusCommand(PicklistSyncTest, TestCase):
args = (
mocks.service_api_get_project_picklist_call,
fixtures.API_PROJECT_STATUS_FIELD,
'project_status',
)
def setUp(self):
super().setUp()
fixture_utils.init_project_statuses()
class TestSyncProjectTypeCommand(PicklistSyncTest, TestCase):
args = (
mocks.service_api_get_project_picklist_call,
fixtures.API_PROJECT_TYPE_FIELD,
'project_type',
)
def setUp(self):
super().setUp()
fixture_utils.init_project_types()
class TestSyncSourceCommand(PicklistSyncTest, TestCase):
args = (
mocks.service_api_get_ticket_picklist_call,
fixtures.API_SOURCE_FIELD,
'source',
)
def setUp(self):
super().setUp()
fixture_utils.init_sources()
class TestSyncIssueTypeCommand(PicklistSyncTest, TestCase):
args = (
mocks.service_api_get_ticket_picklist_call,
fixtures.API_ISSUE_TYPE_FIELD,
'issue_type',
)
def setUp(self):
super().setUp()
fixture_utils.init_issue_types()
class TestSyncSubIssueTypeCommand(PicklistSyncTest, TestCase):
args = (
mocks.service_api_get_ticket_picklist_call,
fixtures.API_SUB_ISSUE_TYPE_FIELD,
'sub_issue_type',
)
def setUp(self):
super().setUp()
fixture_utils.init_issue_types()
fixture_utils.init_sub_issue_types()
class TestSyncTicketTypeCommand(PicklistSyncTest, TestCase):
args = (
mocks.service_api_get_ticket_picklist_call,
fixtures.API_TICKET_TYPE_FIELD,
'ticket_type',
)
def setUp(self):
super().setUp()
fixture_utils.init_ticket_types()
class TestSyncAccountTypeCommand(PicklistSyncTest, TestCase):
args = (
mocks.service_api_get_account_types_call,
fixtures.API_ACCOUNT_TYPE_FIELD,
'account_type',
)
def setUp(self):
super().setUp()
fixture_utils.init_account_types()
class TestSyncServiceCallStatusCommand(PicklistSyncTest,
TestCase):
args = (
mocks.service_api_get_service_call_statuses_call,
fixtures.API_SERVICE_CALL_STATUS_FIELD,
'service_call_status',
)
def setUp(self):
super().setUp()
fixture_utils.init_service_call_statuses()
class TestSyncDisplayColorCommand(PicklistSyncTest, TestCase):
args = (
mocks.service_api_get_ticket_category_picklist_call,
fixtures.API_DISPLAY_COLOR_FIELD,
'display_color',
)
def setUp(self):
super().setUp()
fixture_utils.init_display_colors()
class TestSyncLicenseTypeCommand(PicklistSyncTest, TestCase):
args = (
mocks.service_api_get_license_types_call,
fixtures.API_LICENSE_TYPE_FIELD,
'license_type',
)
def setUp(self):
super().setUp()
fixture_utils.init_license_types()
class TestSyncTaskTypeLinkCommand(PicklistSyncTest, TestCase):
args = (
mocks.service_api_get_task_type_links_call,
fixtures.API_TASK_TYPE_LINK_FIELD,
'task_type_link',
)
def setUp(self):
super().setUp()
fixture_utils.init_task_type_links()
class TestSyncUseTypeCommand(PicklistSyncTest, TestCase):
args = (
mocks.service_api_get_use_types_call,
fixtures.API_USE_TYPE_FIELD,
'use_type',
)
def setUp(self):
super().setUp()
fixture_utils.init_use_types()
class TestSyncTicketCategoryCommand(AbstractBaseSyncRestTest, TestCase):
args = (
mocks.service_api_get_ticket_categories_call,
fixtures.API_TICKET_CATEGORY,
'ticket_category',
)
def setUp(self):
super().setUp()
fixture_utils.init_ticket_categories()
class TestSyncResourceCommand(AbstractBaseSyncTest, TestCase):
args = (
fixtures.API_RESOURCE_LIST,
'resource',
)
class TestSyncTicketSecondaryResourceCommand(AbstractBaseSyncTest, TestCase):
args = (
fixtures.API_SECONDARY_RESOURCE_LIST,
'ticket_secondary_resource',
)
class TestSyncAccountCommand(AbstractBaseSyncTest, TestCase):
args = (
fixtures.API_ACCOUNT_LIST,
'account',
)
class TestSyncAccountLocationCommand(AbstractBaseSyncRestTest, TestCase):
args = (
mocks.service_api_get_account_physical_locations_call,
fixtures.API_ACCOUNT_PHYSICAL_LOCATION,
'account_physical_location',
)
def setUp(self):
super().setUp()
fixture_utils.init_accounts()
fixture_utils.init_account_physical_locations()
class TestSyncProjectCommand(AbstractBaseSyncRestTest, TestCase):
args = (
mocks.service_api_get_projects_call,
fixtures.API_PROJECT,
'project',
)
def setUp(self):
super().setUp()
fixture_utils.init_projects()
class TestSyncPhaseCommand(AbstractBaseSyncTest, TestCase):
args = (
fixtures.API_PHASE_LIST,
'phase',
)
class TestSyncTaskCommand(AbstractBaseSyncRestTest, TestCase):
args = (
mocks.service_api_get_tasks_call,
fixtures.API_TASK,
'task',
)
def setUp(self):
super().setUp()
fixture_utils.init_projects()
fixture_utils.init_tasks()
class TestSyncTaskSecondaryResourceCommand(AbstractBaseSyncTest, TestCase):
args = (
fixtures.API_TASK_SECONDARY_RESOURCE_LIST,
'task_secondary_resource',
)
class TestSyncTicketNoteCommand(AbstractBaseSyncTest, TestCase):
args = (
fixtures.API_TICKET_NOTE_LIST,
'ticket_note',
)
def setUp(self):
super().setUp()
fixture_utils.init_tickets()
fixture_utils.init_ticket_notes()
class TestSyncTaskNoteCommand(AbstractBaseSyncTest, TestCase):
args = (
fixtures.API_TASK_NOTE_LIST,
'task_note',
)
def setUp(self):
super().setUp()
fixture_utils.init_projects()
fixture_utils.init_tasks()
fixture_utils.init_task_notes()
class TestSyncTimeEntryCommand(AbstractBaseSyncTest, TestCase):
args = (
fixtures.API_TIME_ENTRY_LIST,
'time_entry',
)
def setUp(self):
super().setUp()
fixture_utils.init_tickets()
class TestSyncAllocationCodeCommand(AbstractBaseSyncRestTest, TestCase):
args = (
mocks.service_api_get_allocation_codes_call,
fixtures.API_ALLOCATION_CODE,
'allocation_code',
)
def setUp(self):
super().setUp()
fixture_utils.init_allocation_codes()
class TestSyncRoleCommand(AbstractBaseSyncRestTest, TestCase):
args = (
mocks.service_api_get_roles_call,
fixtures.API_ROLE,
'role',
)
def setUp(self):
super().setUp()
fixture_utils.init_roles()
class TestSyncDepartmentCommand(AbstractBaseSyncRestTest, TestCase):
args = (
mocks.service_api_get_departments_call,
fixtures.API_DEPARTMENT,
'department',
)
def setUp(self):
super().setUp()
fixture_utils.init_departments()
class TestSyncResourceServiceDeskRoleCommand(AbstractBaseSyncRestTest,
TestCase):
args = (
mocks.service_api_get_resource_service_desk_roles_call,
fixtures.API_RESOURCE_SERVICE_DESK_ROLE,
'resource_service_desk_role',
)
def setUp(self):
super().setUp()
fixture_utils.init_roles()
fixture_utils.init_resources()
fixture_utils.init_resource_service_desk_roles()
class TestSyncResourceRoleDepartmentCommand(AbstractBaseSyncRestTest,
TestCase):
args = (
mocks.service_api_get_resource_role_departments_call,
fixtures.API_RESOURCE_ROLE_DEPARTMENT,
'resource_role_department',
)
def setUp(self):
super().setUp()
fixture_utils.init_departments()
fixture_utils.init_roles()
fixture_utils.init_resources()
fixture_utils.init_resource_role_departments()
class TestSyncContractCommand(AbstractBaseSyncRestTest, TestCase):
args = (
mocks.service_api_get_contracts_call,
fixtures.API_CONTRACT,
'contract',
)
def setUp(self):
super().setUp()
fixture_utils.init_contracts()
class TestSyncServiceCallCommand(AbstractBaseSyncRestTest, TestCase):
args = (
mocks.service_api_get_service_calls_call,
fixtures.API_SERVICE_CALL,
'service_call',
)
def setUp(self):
super().setUp()
fixture_utils.init_service_call_statuses()
fixture_utils.init_resources()
fixture_utils.init_account_types()
fixture_utils.init_accounts()
class TestSyncServiceCallTicketCommand(AbstractBaseSyncRestTest, TestCase):
args = (
mocks.service_api_get_service_call_tickets_call,
fixtures.API_SERVICE_CALL_TICKET,
'service_call_ticket',
)
def setUp(self):
super().setUp()
fixture_utils.init_service_call_statuses()
fixture_utils.init_resources()
fixture_utils.init_account_types()
fixture_utils.init_accounts()
fixture_utils.init_service_calls()
fixture_utils.init_statuses()
fixture_utils.init_tickets()
class TestSyncServiceCallTaskCommand(AbstractBaseSyncRestTest, TestCase):
args = (
mocks.service_api_get_service_call_tasks_call,
fixtures.API_SERVICE_CALL_TASK,
'service_call_task',
)
def setUp(self):
super().setUp()
fixture_utils.init_service_call_statuses()
fixture_utils.init_account_types()
fixture_utils.init_accounts()
fixture_utils.init_service_calls()
fixture_utils.init_statuses()
fixture_utils.init_projects()
fixture_utils.init_tasks()
class TestSyncServiceCallTicketResourceCommand(AbstractBaseSyncRestTest,
TestCase):
args = (
mocks.service_api_get_service_call_ticket_resources_call,
fixtures.API_SERVICE_CALL_TICKET_RESOURCE,
'service_call_ticket_resource',
)
def setUp(self):
super().setUp()
fixture_utils.init_service_call_statuses()
fixture_utils.init_resources()
fixture_utils.init_account_types()
fixture_utils.init_accounts()
fixture_utils.init_service_calls()
fixture_utils.init_statuses()
fixture_utils.init_tickets()
fixture_utils.init_service_call_tickets()
class TestSyncServiceCallTaskResourceCommand(AbstractBaseSyncRestTest,
TestCase):
args = (
mocks.service_api_get_service_call_task_resources_call,
fixtures.API_SERVICE_CALL_TASK_RESOURCE,
'service_call_task_resource',
)
def setUp(self):
super().setUp()
fixture_utils.init_service_call_statuses()
fixture_utils.init_resources()
fixture_utils.init_account_types()
fixture_utils.init_accounts()
fixture_utils.init_service_calls()
fixture_utils.init_statuses()
fixture_utils.init_projects()
fixture_utils.init_tasks()
fixture_utils.init_service_call_tasks()
class TestSyncTaskPredecessor(AbstractBaseSyncRestTest, TestCase):
args = (
mocks.service_api_get_task_predecessors_call,
fixtures.API_TASK_PREDECESSOR,
'task_predecessor',
)
def setUp(self):
super().setUp()
fixture_utils.init_projects()
fixture_utils.init_tasks()
fixture_utils.init_task_predecessors()
class TestSyncAllCommand(TestCase):
def setUp(self):
super().setUp()
mocks.init_api_connection(Wrapper)
mocks.create_mock_call(
'djautotask.sync.TicketNoteSynchronizer._get_query_conditions',
None
)
mocks.create_mock_call(
'djautotask.sync.TaskNoteSynchronizer._get_query_conditions',
None
)
fixture_utils.mock_udfs()
self._call_service_api()
# Mock API calls to return values based on what entity
# is being requested
mocks.get_field_info_api_calls(
fixture_utils.manage_sync_picklist_return_data
)
mocks.wrapper_query_api_calls(
fixture_utils.manage_full_sync_return_data
)
sync_test_cases = [
TestSyncLicenseTypeCommand,
TestSyncTaskTypeLinkCommand,
TestSyncUseTypeCommand,
TestSyncAccountTypeCommand,
TestSyncRoleCommand,
TestSyncDepartmentCommand,
TestSyncTicketCommand,
TestSyncTaskCommand,
TestSyncStatusCommand,
TestSyncResourceCommand,
TestSyncPriorityCommand,
TestSyncQueueCommand,
TestSyncAccountCommand,
TestSyncProjectCommand,
TestSyncProjectStatusCommand,
TestSyncProjectTypeCommand,
TestSyncTicketCategoryCommand,
TestSyncSourceCommand,
TestSyncIssueTypeCommand,
TestSyncSubIssueTypeCommand,
TestSyncTicketTypeCommand,
TestSyncDisplayColorCommand,
TestSyncTaskSecondaryResourceCommand,
TestSyncPhaseCommand,
TestSyncTicketNoteCommand,
TestSyncTaskNoteCommand,
TestSyncTimeEntryCommand,
TestSyncAllocationCodeCommand,
TestSyncResourceRoleDepartmentCommand,
TestSyncResourceServiceDeskRoleCommand,
TestSyncContractCommand,
TestSyncServiceCallStatusCommand,
TestSyncServiceCallCommand,
TestSyncServiceCallTicketCommand,
TestSyncServiceCallTaskCommand,
TestSyncServiceCallTicketResourceCommand,
TestSyncServiceCallTaskResourceCommand,
TestSyncAccountLocationCommand,
TestSyncTaskPredecessor,
TestSyncContactCommand,
]
self.test_args = []
for test_case in sync_test_cases:
# for REST API
if len(test_case.args) == 3:
self.test_args.append(test_case.args)
# for SOAP API
else:
new_test_case = [None, *test_case.args]
self.test_args.append(new_test_case)
def test_partial_sync(self):
"""
Test the command to run a sync of all objects without
the --full argument.
"""
output = run_sync_command()
for mock_call, fixture, at_object in self.test_args:
if mock_call:
if 'fields' in fixture:
fixture_len = \
len(fixture.get('fields')[0].get('picklistValues'))
else:
fixture_len = len(fixture.get('items'))
else:
fixture_len = len(fixture)
summary = sync_summary(slug_to_title(at_object), fixture_len)
self.assertIn(summary, output.getvalue().strip())
self.assertEqual(
models.Ticket.objects.all().count(),
len(fixtures.API_TICKET['items'])
)
def test_full_sync(self):
"""Test the command to run a full sync of all objects."""
at_object_map = {
'account_type': models.AccountType,
'role': models.Role,
'department': models.Department,
'status': models.Status,
'priority': models.Priority,
'queue': models.Queue,
'source': models.Source,
'issue_type': models.IssueType,
'display_color': models.DisplayColor,
'ticket': models.Ticket,
'resource': models.Resource,
'ticket_secondary_resource': models.TicketSecondaryResource,
'account': models.Account,
'account_physical_location': models.AccountPhysicalLocation,
'project': models.Project,
'project_status': models.ProjectStatus,
'project_type': models.ProjectType,
'ticket_category': models.TicketCategory,
'sub_issue_type': models.SubIssueType,
'ticket_type': models.TicketType,
'license_type': models.LicenseType,
'task': models.Task,
'task_secondary_resource': models.TaskSecondaryResource,
'phase': models.Phase,
'ticket_note': models.TicketNote,
'task_note': models.TaskNote,
'time_entry': models.TimeEntry,
'task_type_link': models.TaskTypeLink,
'use_type': models.UseType,
'allocation_code': models.AllocationCode,
'resource_role_department': models.ResourceRoleDepartment,
'resource_service_desk_role': models.ResourceServiceDeskRole,
'contract': models.Contract,
'service_call_status': models.ServiceCallStatus,
'service_call': models.ServiceCall,
'service_call_ticket': models.ServiceCallTicket,
'service_call_task': models.ServiceCallTask,
'service_call_ticket_resource': models.ServiceCallTicketResource,
'service_call_task_resource': models.ServiceCallTaskResource,
'task_predecessor': models.TaskPredecessor,
'contact': models.Contact,
}
run_sync_command()
pre_full_sync_counts = {}
mocks.wrapper_query_api_calls()
mocks.get_field_info_api_calls()
_, _patch = mocks.build_batch_query()
self._call_empty_service_api()
for key, model_class in at_object_map.items():
pre_full_sync_counts[key] = model_class.objects.all().count()
output = run_sync_command(full_option=True)
_patch.stop()
# Verify the rest of sync classes summaries.
for mock_call, fixture, at_object in self.test_args:
if at_object in (
'resource_role_department',
'resource_service_desk_role',
'service_call',
'service_call_ticket',
'service_call_task',
'service_call_ticket_resource',
'service_call_task_resource',
'task_predecessor',
'task'
):
# Assert that there were objects to get deleted, then change
# to zero to verify the output formats correctly.
# We are just testing the command, there are sync tests to
# verify that the synchronizers work correctly
self.assertGreater(pre_full_sync_counts[at_object], 0)
pre_full_sync_counts[at_object] = 0
summary = full_sync_summary(
slug_to_title(at_object),
pre_full_sync_counts[at_object]
)
self.assertIn(summary, output.getvalue().strip())
def _call_service_api(self):
mocks.service_api_get_roles_call(fixtures.API_ROLE)
mocks.service_api_get_departments_call(fixtures.API_DEPARTMENT)
mocks.service_api_get_resource_service_desk_roles_call(
fixtures.API_RESOURCE_SERVICE_DESK_ROLE)
mocks.service_api_get_resource_role_departments_call(
fixtures.API_RESOURCE_ROLE_DEPARTMENT)
mocks.service_api_get_license_types_call(
fixtures.API_LICENSE_TYPE_FIELD)
mocks.service_api_get_use_types_call(fixtures.API_USE_TYPE_FIELD)
mocks.service_api_get_task_type_links_call(
fixtures.API_TASK_TYPE_LINK_FIELD)
mocks.service_api_get_account_types_call(
fixtures.API_ACCOUNT_TYPE_FIELD)
mocks.service_api_get_ticket_category_picklist_call(
fixtures.API_DISPLAY_COLOR_FIELD)
mocks.service_api_get_ticket_picklist_call(
fixtures.API_TICKET_PICKLIST_FIELD)
mocks.service_api_get_project_picklist_call(
fixtures.API_PROJECT_PICKLIST_FIELD)
mocks.service_api_get_service_call_statuses_call(
fixtures.API_SERVICE_CALL_STATUS_FIELD)
mocks.service_api_get_contacts_call(fixtures.API_CONTACT)
mocks.service_api_get_contracts_call(fixtures.API_CONTRACT)
mocks.service_api_get_allocation_codes_call(
fixtures.API_ALLOCATION_CODE)
mocks.service_api_get_account_physical_locations_call(
fixtures.API_ACCOUNT_PHYSICAL_LOCATION)
mocks.service_api_get_ticket_categories_call(
fixtures.API_TICKET_CATEGORY)
mocks.service_api_get_tickets_call(fixtures.API_TICKET)
mocks.service_api_get_tasks_call(fixtures.API_TASK)
mocks.service_api_get_projects_call(fixtures.API_PROJECT)
| |
# -*- coding: utf-8 -*-
#
# Copyright 2010 <NAME>
#
# Distributed under the terms of the MIT license
#
import warnings as _warnings
import os as _os
import ctypes as _ct
from ctypes.util import find_library as _find_library
import numpy as _np
from numpy.ctypeslib import ndpointer as _ndpointer
import pkg_resources
_sofalib_filename = pkg_resources.resource_filename('pysofa', 'sofa_c.dll')
if not _os.path.isfile(_sofalib_filename):
_sofalib_filename = None
if _os.environ.get('SOFA_LIBRARY') is not None:
_sofalib_filename = _os.environ['SOFA_LIBRARY']
else:
_sofalib_filename = _find_library('sofa_c')
if _sofalib_filename is None:
raise ImportError('Unable to find the shared C library "sofa_c".')
_sofa = _ct.CDLL(_sofalib_filename)
# Try to guess what SOFA version we're dealing with,
# by testing the presence of newly created functions
# between each version.
__sofa_version = None
try:
_sofa.iauTaitt
__sofa_version = (2010, 12, 1)
except AttributeError:
__sofa_version = (2009, 12, 31)
def get_sofa_version():
""" Return a tuple containing the three components of the
|SOFA| library release wich has been loaded, in the form
(year, month, day).
In case the release number hasn't been resolved (*None*, *None*, *None*)
is returned. This should never occur and shall be signaled as a bug.
.. note:: this only deals with *major* release numbers and does not
account for *revised* versions of |SOFA|.
"""
if __sofa_version is None:
return (None, None, None)
else:
return __sofa_version
def has_function(funcname):
""" Helper function that returns True if this particular release of |SOFA|
provides the function *funcname*, and False otherwise. This is only the
case with function names that are legal |SOFA| function names, wich means
that calling ``has_function`` with a *funcname* that isn't known by any
version of |SOFA| will raise :exc:`AttributeError`.
>>> pysofa.has_function('era00')
True
>>> pysofa.has_function('taitt') # if SOFA release < (2010, 12, 1)
False
>>> pysofa.has_function('foo')
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "pysofa_ctypes.py", line 62, in has_function
(__name__, funcname))
AttributeError: pysofa does not know any function "named" foo
"""
if not funcname in globals():
raise AttributeError('%s does not know any function "named" %s' % \
(__name__, funcname))
# convert 'funcname' to its equivalent SOFA name
funcname = 'iau' + funcname[0].upper() + funcname[1:]
return hasattr(_sofa, funcname)
def _req_shape_c(a, dtype=None, shape=None, req=None):
return _np.require(a, dtype=dtype, requirements=req).reshape(shape,
order='C')
# iauA2af
_sofa.iauA2af.argtypes = [_ct.c_int, #ndp
_ct.c_double, #angle
_ct.POINTER(_ct.c_char), #sign
_ct.c_int * 4] #idmsf
def a2af(ndp, angle):
""" Decompose radians into degrees, arcminutes, arcseconds, fraction.
:param ndp: the requested resolution.
:type ndp: int
:param angle: the value to decompose.
:type angle: float
:returns: a tuple whose first member is a string containing the sign, and
the second member is itself a tuple (degrees, arcminutes, arcseconds,
fraction).
.. seealso:: |MANUAL| page 19
"""
sign = _ct.c_char()
idmsf = (_ct.c_int * 4)()
_sofa.iauA2af(ndp, float(angle), _ct.byref(sign), idmsf)
return sign.value, tuple([v for v in idmsf])
# iauA2tf
_sofa.iauA2tf.argtypes = [_ct.c_int, #ndp
_ct.c_double, #angle
_ct.POINTER(_ct.c_char), #sign
_ct.c_int * 4] #ihmsf
def a2tf(ndp, angle):
""" Decompose radians into hours, arcminutes, arcseconds, fraction.
:param ndp: the requested resolution.
:type ndp: int
:param angle: the value to decompose.
:type angle: float
:returns: a tuple whose first member is a string containing the sign, and
the second member is itself a tuple (hours, arcminutes, arcseconds,
fraction).
.. seealso:: |MANUAL| page 20
"""
sign = _ct.c_char()
ihmsf = (_ct.c_int * 4)()
_sofa.iauA2tf(ndp, float(angle), _ct.byref(sign), ihmsf)
return sign.value, tuple([v for v in ihmsf])
# iauAf2a
# this routine was added in release 2010-12-01 of SOFA
try:
_sofa.iauAf2a.argtypes = [_ct.c_char, #sign
_ct.c_int, #ideg
_ct.c_int, #iamin
_ct.c_double, #asec
_ct.POINTER(_ct.c_double)] #rad
_sofa.iauAf2a.restype = _ct.c_int
except AttributeError:
pass
_af2a_msg = {0: 'OK', # Unused
1:'Af2a: degrees outside the range 0-359',
2:'Af2a: arcminutes outside the range 0-59',
3:'Af2a: arcseconds outside the range 0-59.999...'}
def af2a(s, ideg, iamin, asec):
""" Convert degrees, arcminutes, arcseconds to radians.
:param s: sign, '-' for negative, otherwise positive.
:param ideg: degrees.
:type ideg: int
:param iamin: arcminutes.
:type iamin: int
:param asec: arcseconds.
:type asec: float
:returns: the converted value in radians as a float.
:raises: :exc:`UserWarning` in case *ideg*, *iamin* or *asec*
values are outside the range 0-359, 0-59 or 0-59.999...
:exc:`NotImplementedError` if called with a |SOFA| release prior
to 2010/12/01.
.. seealso:: |MANUAL| page 21
"""
if __sofa_version < (2010, 12, 1):
raise NotImplementedError
rad = _ct.c_double()
s = _sofa.iauAf2a(s, ideg, iamin, asec, _ct.byref(rad))
if s != 0:
_warnings.warn(_af2a_msg[s], UserWarning, 2)
return rad.value
# iauAnp
_sofa.iauAnp.argtypes = [_ct.c_double]
_sofa.iauAnp.restype = _ct.c_double
def anp(a):
""" Normalize *a* into the range 0 <= result < 2pi.
:param a: the value to normalize.
:type a: float
:returns: the normalized value as a float.
.. seealso:: |MANUAL| page 22
"""
return _sofa.iauAnp(float(a))
# iauAnpm
_sofa.iauAnpm.argtypes = [_ct.c_double]
_sofa.iauAnpm.restype = _ct.c_double
def anpm(a):
""" Normalize *a* into the range -pi <= result < +pi.
:param a: the value to normalize.
:type a: float
:returns: the normalized value as a float.
.. seealso:: |MANUAL| page 23
"""
return _sofa.iauAnpm(float(a))
# iauBi00
_sofa.iauBi00.argtypes = [_ct.POINTER(_ct.c_double), #dpsibi
_ct.POINTER(_ct.c_double), #depsbi
_ct.POINTER(_ct.c_double)] #dra
def bi00():
""" Frame bias components of IAU 2000 precession-nutation models.
:returns: a tuple of three items:
* longitude correction (float)
* obliquity correction (float)
* the ICRS RA of the J2000.0 mean equinox (float).
.. seealso:: |MANUAL| page 24
"""
dpsibi = _ct.c_double()
depsbi = _ct.c_double()
dra = _ct.c_double()
_sofa.iauBi00(_ct.byref(dpsibi), _ct.byref(depsbi), _ct.byref(dra))
return dpsibi.value, depsbi.value, dra.value
# iauBp00
_sofa.iauBp00.argtypes = [_ct.c_double, #date1
_ct.c_double, #date2
_ndpointer(shape=(3,3), dtype=float, flags='C'), #rb
_ndpointer(shape=(3,3), dtype=float, flags='C'), #rp
_ndpointer(shape=(3,3), dtype=float, flags='C')] #rbp
def bp00(date1, date2):
""" Frame bias and precession, IAU 2000.
:param date1, date2: TT as a two-part Julian date.
:returns: a tuple of three items:
* frame bias matrix (numpy.matrix of shape 3x3)
* precession matrix (numpy.matrix of shape 3x3)
* bias-precession matrix (numpy.matrix of shape 3x3)
.. seealso:: |MANUAL| page 25
"""
rb = _np.asmatrix(_np.zeros(shape=(3,3), dtype=float, order='C'))
rp = _np.asmatrix(_np.zeros(shape=(3,3), dtype=float, order='C'))
rbp = _np.asmatrix(_np.zeros(shape=(3,3), dtype=float, order='C'))
_sofa.iauBp00(date1, date2, rb, rp, rbp)
return rb, rp, rbp
# iauBp06
_sofa.iauBp06.argtypes = [_ct.c_double, #date1
_ct.c_double, #date2
_ndpointer(shape=(3,3), dtype=float, flags='C'), #rb
_ndpointer(shape=(3,3), dtype=float, flags='C'), #rp
_ndpointer(shape=(3,3), dtype=float, flags='C')] #rbp
def bp06(date1, date2):
""" Frame bias and precession, IAU 2006.
:param date1, date2: TT as a two-part Julian date.
:returns: a tuple of three items:
* frame bias matrix (numpy.matrix of shape 3x3)
* precession matrix (numpy.matrix of shape 3x3)
* bias-precession matrix (numpy.matrix of shape 3x3)
.. seealso:: |MANUAL| page 27
"""
rb = _np.asmatrix(_np.zeros(shape=(3,3), dtype=float, order='C'))
rp = _np.asmatrix(_np.zeros(shape=(3,3), dtype=float, order='C'))
rbp = _np.asmatrix(_np.zeros(shape=(3,3), dtype=float, order='C'))
_sofa.iauBp06(date1, date2, rb, rp, rbp)
return rb, rp, rbp
# iauBpn2xy
_sofa.iauBpn2xy.argtypes = [
_ndpointer(shape=(3,3), dtype=float, flags='C'), #rbpn
_ct.POINTER(_ct.c_double), #x
_ct.POINTER(_ct.c_double)] #y
def bpn2xy(rbpn):
""" Extract from the bias-precession-nutation matrix the X,Y coordinates
of the Celestial Intermediate Pole.
:param rbpn: celestial-to-true matrix
:type rbpn: numpy.ndarray, matrix or nested sequences of shape 3x3
:returns: a tuple of two items containing *x* and *y*, as floats.
.. seealso:: |MANUAL| page 28
"""
x = _ct.c_double()
y = _ct.c_double()
_sofa.iauBpn2xy(_req_shape_c(rbpn, float, (3,3)), x, y)
return x.value, y.value
# iauC2i00a
_sofa.iauC2i00a.argtypes = [_ct.c_double, #date1
_ct.c_double, #date2
_ndpointer(shape=(3,3), dtype=float, flags='C')] #rc2i
def c2i00a(date1, date2):
""" Form the celestial-to-intermediate matrix for a given date using the
IAU 2000A precession-nutation model.
:param date1, date2: TT as a two-part Julian date.
:returns: the celestial-to-intermediate matrix, as a numpy.matrix of
shape 3x3.
.. seealso:: |MANUAL| page 29
"""
rc2i = _np.asmatrix(_np.zeros(shape=(3,3), dtype=float, order='C'))
_sofa.iauC2i00a(date1, date2, rc2i)
return rc2i
# iauC2i00b
_sofa.iauC2i00b.argtypes = [_ct.c_double, #date1
_ct.c_double, #date2
_ndpointer(shape=(3,3), dtype=float, flags='C')] #rc2i
def c2i00b(date1, date2):
""" Form the celestial-to-intermediate matrix for a given date using the
IAU 2000B precession-nutation model.
:param date1, date2: TT as a two-part Julian date.
:returns: the celestial-to-intermediate matrix, as a numpy.matrix of
shape 3x3.
.. seealso:: |MANUAL| page 31
"""
rc2i = _np.asmatrix(_np.zeros(shape=(3,3), dtype=float, order='C'))
_sofa.iauC2i00b(date1, date2, rc2i)
return rc2i
# iauC2i06a
_sofa.iauC2i06a.argtypes = [_ct.c_double, #date1
_ct.c_double, #date2
_ndpointer(shape=(3,3), dtype=float, flags='C')] #rc2i
def c2i06a(date1, date2):
""" Form the celestial-to-intermediate matrix for a given date using the
IAU 2006 precession-nutation model.
:param date1, date2: TT as a two-part Julian date.
:returns: the celestial-to-intermediate matrix, as a numpy.matrix of
shape 3x3.
.. seealso:: |MANUAL| page 33
"""
rc2i = _np.asmatrix(_np.zeros(shape=(3,3), dtype=float, order='C'))
_sofa.iauC2i06a(date1, date2, rc2i)
return rc2i
# iauC2ibpn
_sofa.iauC2ibpn.argtypes = [_ct.c_double, #date1
_ct.c_double, #date2
_ndpointer(shape=(3,3), dtype=float, flags='C'), #rbpn
_ndpointer(shape=(3,3), dtype=float, flags='C')] #rc2i
def c2ibpn(date1, date2, rbpn):
""" Form the celestial-to-intermediate matrix for a given date given | |
# -*- coding: utf-8 -*-
"""
v9s model
* Input: v5_im
Author: Kohei <<EMAIL>>
"""
from logging import getLogger, Formatter, StreamHandler, INFO, FileHandler
from pathlib import Path
import subprocess
import argparse
import math
import glob
import sys
import json
import re
import warnings
import scipy
import tqdm
import click
import tables as tb
import pandas as pd
import numpy as np
from keras.models import Model
from keras.engine.topology import merge as merge_l
from keras.layers import (
Input, Convolution2D, MaxPooling2D, UpSampling2D,
Reshape, core, Dropout,
Activation, BatchNormalization)
from keras.optimizers import Adam
from keras.callbacks import ModelCheckpoint, EarlyStopping, History
from keras import backend as K
import skimage.transform
import skimage.morphology
import rasterio.features
import shapely.wkt
import shapely.ops
import shapely.geometry
MODEL_NAME = 'v9s'
ORIGINAL_SIZE = 650
INPUT_SIZE = 256
LOGFORMAT = '%(asctime)s %(levelname)s %(message)s'
BASE_DIR = "/data/train"
WORKING_DIR = "/data/working"
IMAGE_DIR = "/data/working/images/{}".format('v5')
MODEL_DIR = "/data/working/models/{}".format(MODEL_NAME)
FN_SOLUTION_CSV = "/data/output/{}.csv".format(MODEL_NAME)
# Parameters
MIN_POLYGON_AREA = 30
# Input files
FMT_TRAIN_SUMMARY_PATH = str(
Path(BASE_DIR) /
Path("{prefix:s}_Train/") /
Path("summaryData/{prefix:s}_Train_Building_Solutions.csv"))
FMT_TRAIN_RGB_IMAGE_PATH = str(
Path(BASE_DIR) /
Path("{prefix:s}_Train/") /
Path("RGB-PanSharpen/RGB-PanSharpen_{image_id:s}.tif"))
FMT_TEST_RGB_IMAGE_PATH = str(
Path(BASE_DIR) /
Path("{prefix:s}_Test_public/") /
Path("RGB-PanSharpen/RGB-PanSharpen_{image_id:s}.tif"))
FMT_TRAIN_MSPEC_IMAGE_PATH = str(
Path(BASE_DIR) /
Path("{prefix:s}_Train/") /
Path("MUL-PanSharpen/MUL-PanSharpen_{image_id:s}.tif"))
FMT_TEST_MSPEC_IMAGE_PATH = str(
Path(BASE_DIR) /
Path("{prefix:s}_Test_public/") /
Path("MUL-PanSharpen/MUL-PanSharpen_{image_id:s}.tif"))
# Preprocessing result
FMT_BANDCUT_TH_PATH = IMAGE_DIR + "/bandcut{}.csv"
FMT_MUL_BANDCUT_TH_PATH = IMAGE_DIR + "/mul_bandcut{}.csv"
# Image list, Image container and mask container
FMT_VALTRAIN_IMAGELIST_PATH = IMAGE_DIR + "/{prefix:s}_valtrain_ImageId.csv"
FMT_VALTEST_IMAGELIST_PATH = IMAGE_DIR + "/{prefix:s}_valtest_ImageId.csv"
FMT_VALTRAIN_IM_STORE = IMAGE_DIR + "/valtrain_{}_im.h5"
FMT_VALTEST_IM_STORE = IMAGE_DIR + "/valtest_{}_im.h5"
FMT_VALTRAIN_MASK_STORE = IMAGE_DIR + "/valtrain_{}_mask.h5"
FMT_VALTEST_MASK_STORE = IMAGE_DIR + "/valtest_{}_mask.h5"
FMT_VALTRAIN_MUL_STORE = IMAGE_DIR + "/valtrain_{}_mul.h5"
FMT_VALTEST_MUL_STORE = IMAGE_DIR + "/valtest_{}_mul.h5"
FMT_TRAIN_IMAGELIST_PATH = IMAGE_DIR + "/{prefix:s}_train_ImageId.csv"
FMT_TEST_IMAGELIST_PATH = IMAGE_DIR + "/{prefix:s}_test_ImageId.csv"
FMT_TRAIN_IM_STORE = IMAGE_DIR + "/train_{}_im.h5"
FMT_TEST_IM_STORE = IMAGE_DIR + "/test_{}_im.h5"
FMT_TRAIN_MASK_STORE = IMAGE_DIR + "/train_{}_mask.h5"
FMT_TRAIN_MUL_STORE = IMAGE_DIR + "/train_{}_mul.h5"
FMT_TEST_MUL_STORE = IMAGE_DIR + "/test_{}_mul.h5"
FMT_IMMEAN = IMAGE_DIR + "/{}_immean.h5"
FMT_MULMEAN = IMAGE_DIR + "/{}_mulmean.h5"
# Model files
FMT_VALMODEL_PATH = MODEL_DIR + "/{}_val_weights.h5"
FMT_FULLMODEL_PATH = MODEL_DIR + "/{}_full_weights.h5"
FMT_VALMODEL_HIST = MODEL_DIR + "/{}_val_hist.csv"
FMT_VALMODEL_EVALHIST = MODEL_DIR + "/{}_val_evalhist.csv"
FMT_VALMODEL_EVALTHHIST = MODEL_DIR + "/{}_val_evalhist_th.csv"
# Prediction & polygon result
FMT_TESTPRED_PATH = MODEL_DIR + "/{}_pred.h5"
FMT_VALTESTPRED_PATH = MODEL_DIR + "/{}_eval_pred.h5"
FMT_VALTESTPOLY_PATH = MODEL_DIR + "/{}_eval_poly.csv"
FMT_VALTESTTRUTH_PATH = MODEL_DIR + "/{}_eval_poly_truth.csv"
FMT_VALTESTPOLY_OVALL_PATH = MODEL_DIR + "/eval_poly.csv"
FMT_VALTESTTRUTH_OVALL_PATH = MODEL_DIR + "/eval_poly_truth.csv"
FMT_TESTPOLY_PATH = MODEL_DIR + "/{}_poly.csv"
# Model related files (others)
FMT_VALMODEL_LAST_PATH = MODEL_DIR + "/{}_val_weights_last.h5"
FMT_FULLMODEL_LAST_PATH = MODEL_DIR + "/{}_full_weights_last.h5"
# Logger
warnings.simplefilter("ignore", UserWarning)
handler = StreamHandler()
handler.setLevel(INFO)
handler.setFormatter(Formatter(LOGFORMAT))
fh_handler = FileHandler(".{}.log".format(MODEL_NAME))
fh_handler.setFormatter(Formatter(LOGFORMAT))
logger = getLogger('spacenet2')
logger.setLevel(INFO)
if __name__ == '__main__':
logger.addHandler(handler)
logger.addHandler(fh_handler)
# Fix seed for reproducibility
np.random.seed(1145141919)
def directory_name_to_area_id(datapath):
"""
Directory name to AOI number
Usage:
>>> directory_name_to_area_id("/data/test/AOI_2_Vegas")
2
"""
dir_name = Path(datapath).name
if dir_name.startswith('AOI_2_Vegas'):
return 2
elif dir_name.startswith('AOI_3_Paris'):
return 3
elif dir_name.startswith('AOI_4_Shanghai'):
return 4
elif dir_name.startswith('AOI_5_Khartoum'):
return 5
else:
raise RuntimeError("Unsupported city id is given.")
def _remove_interiors(line):
if "), (" in line:
line_prefix = line.split('), (')[0]
line_terminate = line.split('))",')[-1]
line = (
line_prefix +
'))",' +
line_terminate
)
return line
def __load_band_cut_th(band_fn, bandsz=3):
df = pd.read_csv(band_fn, index_col='area_id')
all_band_cut_th = {area_id: {} for area_id in range(2, 6)}
for area_id, row in df.iterrows():
for chan_i in range(bandsz):
all_band_cut_th[area_id][chan_i] = dict(
min=row['chan{}_min'.format(chan_i)],
max=row['chan{}_max'.format(chan_i)],
)
return all_band_cut_th
def _calc_fscore_per_aoi(area_id):
prefix = area_id_to_prefix(area_id)
truth_file = FMT_VALTESTTRUTH_PATH.format(prefix)
poly_file = FMT_VALTESTPOLY_PATH.format(prefix)
cmd = [
'java',
'-jar',
'/root/visualizer-2.0/visualizer.jar',
'-truth',
truth_file,
'-solution',
poly_file,
'-no-gui',
'-band-triplets',
'/root/visualizer-2.0/data/band-triplets.txt',
'-image-dir',
'pass',
]
proc = subprocess.Popen(
cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
stdout_data, stderr_data = proc.communicate()
lines = [line for line in stdout_data.decode('utf8').split('\n')[-10:]]
"""
Overall F-score : 0.85029
AOI_2_Vegas:
TP : 27827
FP : 4999
FN : 4800
Precision: 0.847712
Recall : 0.852883
F-score : 0.85029
"""
if stdout_data.decode('utf8').strip().endswith("Overall F-score : 0"):
overall_fscore = 0
tp = 0
fp = 0
fn = 0
precision = 0
recall = 0
fscore = 0
elif len(lines) > 0 and lines[0].startswith("Overall F-score : "):
assert lines[0].startswith("Overall F-score : ")
assert lines[2].startswith("AOI_")
assert lines[3].strip().startswith("TP")
assert lines[4].strip().startswith("FP")
assert lines[5].strip().startswith("FN")
assert lines[6].strip().startswith("Precision")
assert lines[7].strip().startswith("Recall")
assert lines[8].strip().startswith("F-score")
overall_fscore = float(re.findall("([\d\.]+)", lines[0])[0])
tp = int(re.findall("(\d+)", lines[3])[0])
fp = int(re.findall("(\d+)", lines[4])[0])
fn = int(re.findall("(\d+)", lines[5])[0])
precision = float(re.findall("([\d\.]+)", lines[6])[0])
recall = float(re.findall("([\d\.]+)", lines[7])[0])
fscore = float(re.findall("([\d\.]+)", lines[8])[0])
else:
logger.warn("Unexpected data >>> " + stdout_data.decode('utf8'))
raise RuntimeError("Unsupported format")
return {
'overall_fscore': overall_fscore,
'tp': tp,
'fp': fp,
'fn': fn,
'precision': precision,
'recall': recall,
'fscore': fscore,
}
def prefix_to_area_id(prefix):
area_dict = {
'AOI_2_Vegas': 2,
'AOI_3_Paris': 3,
'AOI_4_Shanghai': 4,
'AOI_5_Khartoum': 5,
}
return area_dict[area_id]
def area_id_to_prefix(area_id):
area_dict = {
2: 'AOI_2_Vegas',
3: 'AOI_3_Paris',
4: 'AOI_4_Shanghai',
5: 'AOI_5_Khartoum',
}
return area_dict[area_id]
# ---------------------------------------------------------
# main
def _get_model_parameter(area_id):
prefix = area_id_to_prefix(area_id)
fn_hist = FMT_VALMODEL_EVALTHHIST.format(prefix)
best_row = pd.read_csv(fn_hist).sort_values(
by='fscore',
ascending=False,
).iloc[0]
param = dict(
fn_epoch=int(best_row['zero_base_epoch']),
min_poly_area=int(best_row['min_area_th']),
)
return param
def get_resized_raster_3chan_image(image_id, band_cut_th=None):
fn = train_image_id_to_path(image_id)
with rasterio.open(fn, 'r') as f:
values = f.read().astype(np.float32)
for chan_i in range(3):
min_val = band_cut_th[chan_i]['min']
max_val = band_cut_th[chan_i]['max']
values[chan_i] = np.clip(values[chan_i], min_val, max_val)
values[chan_i] = (values[chan_i] - min_val) / (max_val - min_val)
values = np.swapaxes(values, 0, 2)
values = np.swapaxes(values, 0, 1)
values = skimage.transform.resize(values, (INPUT_SIZE, INPUT_SIZE))
return values
def get_resized_raster_3chan_image_test(image_id, band_cut_th=None):
fn = test_image_id_to_path(image_id)
with rasterio.open(fn, 'r') as f:
values = f.read().astype(np.float32)
for chan_i in range(3):
min_val = band_cut_th[chan_i]['min']
max_val = band_cut_th[chan_i]['max']
values[chan_i] = np.clip(values[chan_i], min_val, max_val)
values[chan_i] = (values[chan_i] - min_val) / (max_val - min_val)
values = np.swapaxes(values, 0, 2)
values = np.swapaxes(values, 0, 1)
values = skimage.transform.resize(values, (INPUT_SIZE, INPUT_SIZE))
return values
def image_mask_resized_from_summary(df, image_id):
im_mask = np.zeros((650, 650))
for idx, row in df[df.ImageId == image_id].iterrows():
shape_obj = shapely.wkt.loads(row.PolygonWKT_Pix)
if shape_obj.exterior is not None:
coords = list(shape_obj.exterior.coords)
x = [round(float(pp[0])) for pp in coords]
y = [round(float(pp[1])) for pp in coords]
yy, xx = skimage.draw.polygon(y, x, (650, 650))
im_mask[yy, xx] = 1
interiors = shape_obj.interiors
for interior in interiors:
coords = list(interior.coords)
x = [round(float(pp[0])) for pp in coords]
y = [round(float(pp[1])) for pp in coords]
yy, xx = skimage.draw.polygon(y, x, (650, 650))
im_mask[yy, xx] = 0
im_mask = skimage.transform.resize(im_mask, (INPUT_SIZE, INPUT_SIZE))
im_mask = (im_mask > 0.5).astype(np.uint8)
return im_mask
def train_test_image_prep(area_id):
prefix = area_id_to_prefix(area_id)
df_train = pd.read_csv(
FMT_TRAIN_IMAGELIST_PATH.format(prefix=prefix),
index_col='ImageId')
df_test = pd.read_csv(
FMT_TEST_IMAGELIST_PATH.format(prefix=prefix),
index_col='ImageId')
band_cut_th = __load_band_cut_th(
FMT_BANDCUT_TH_PATH.format(prefix))[area_id]
df_summary = _load_train_summary_data(area_id)
fn = FMT_TRAIN_IM_STORE.format(prefix)
logger.info("Prepare image container: {}".format(fn))
with tb.open_file(fn, 'w') as f:
for image_id in tqdm.tqdm(df_train.index, total=len(df_train)):
im = get_resized_raster_3chan_image(image_id, band_cut_th)
atom = tb.Atom.from_dtype(im.dtype)
filters = tb.Filters(complib='blosc', complevel=9)
ds = f.create_carray(f.root, image_id, atom, im.shape,
filters=filters)
ds[:] = im
fn = FMT_TEST_IM_STORE.format(prefix)
logger.info("Prepare image container: {}".format(fn))
with tb.open_file(fn, 'w') as f:
for image_id in tqdm.tqdm(df_test.index, total=len(df_test)):
im = get_resized_raster_3chan_image_test(image_id, band_cut_th)
atom = tb.Atom.from_dtype(im.dtype)
filters = tb.Filters(complib='blosc', complevel=9)
ds = f.create_carray(f.root, image_id, atom, im.shape,
filters=filters)
ds[:] = im
fn = FMT_TRAIN_MASK_STORE.format(prefix)
logger.info("Prepare image container: {}".format(fn))
with tb.open_file(fn, 'w') as f:
for image_id in tqdm.tqdm(df_train.index, total=len(df_train)):
im_mask = image_mask_resized_from_summary(df_summary, image_id)
atom = tb.Atom.from_dtype(im_mask.dtype)
filters = tb.Filters(complib='blosc', complevel=9)
ds = f.create_carray(f.root, image_id, atom, im_mask.shape,
filters=filters)
ds[:] = im_mask
def valtrain_test_image_prep(area_id):
prefix = area_id_to_prefix(area_id)
logger.info("valtrain_test_image_prep for {}".format(prefix))
df_train = pd.read_csv(
FMT_VALTRAIN_IMAGELIST_PATH.format(prefix=prefix),
index_col='ImageId')
df_test = pd.read_csv(
FMT_VALTEST_IMAGELIST_PATH.format(prefix=prefix),
index_col='ImageId')
band_cut_th = __load_band_cut_th(
FMT_BANDCUT_TH_PATH.format(prefix))[area_id]
df_summary = _load_train_summary_data(area_id)
fn = FMT_VALTRAIN_IM_STORE.format(prefix)
logger.info("Prepare image container: {}".format(fn))
with tb.open_file(fn, 'w') as f:
for image_id in tqdm.tqdm(df_train.index, total=len(df_train)):
im = get_resized_raster_3chan_image(image_id, band_cut_th)
atom = tb.Atom.from_dtype(im.dtype)
filters = tb.Filters(complib='blosc', complevel=9)
ds = f.create_carray(f.root, image_id, atom, im.shape,
filters=filters)
ds[:] = im
fn = FMT_VALTEST_IM_STORE.format(prefix)
logger.info("Prepare image container: {}".format(fn))
with tb.open_file(fn, 'w') as f:
for image_id in tqdm.tqdm(df_test.index, total=len(df_test)):
im = get_resized_raster_3chan_image(image_id, band_cut_th)
atom = tb.Atom.from_dtype(im.dtype)
filters = tb.Filters(complib='blosc', complevel=9)
ds = f.create_carray(f.root, image_id, atom, im.shape,
filters=filters)
ds[:] = im
fn = FMT_VALTRAIN_MASK_STORE.format(prefix)
logger.info("Prepare image container: {}".format(fn))
with tb.open_file(fn, 'w') as f:
for image_id in tqdm.tqdm(df_train.index, total=len(df_train)):
im_mask = image_mask_resized_from_summary(df_summary, image_id)
atom = tb.Atom.from_dtype(im_mask.dtype)
filters = tb.Filters(complib='blosc', complevel=9)
ds = f.create_carray(f.root, image_id, atom, im_mask.shape,
filters=filters)
ds[:] = im_mask
fn = FMT_VALTEST_MASK_STORE.format(prefix)
logger.info("Prepare image container: {}".format(fn))
with tb.open_file(fn, 'w') as f:
for image_id in tqdm.tqdm(df_test.index, total=len(df_test)):
im_mask = image_mask_resized_from_summary(df_summary, image_id)
atom = tb.Atom.from_dtype(im_mask.dtype)
filters = tb.Filters(complib='blosc', complevel=9)
ds = f.create_carray(f.root, image_id, atom, im_mask.shape,
filters=filters)
ds[:] = im_mask
def train_test_mul_image_prep(area_id):
prefix = area_id_to_prefix(area_id)
df_train = pd.read_csv(
FMT_TRAIN_IMAGELIST_PATH.format(prefix=prefix),
index_col='ImageId')
df_test = pd.read_csv(
FMT_TEST_IMAGELIST_PATH.format(prefix=prefix),
index_col='ImageId')
band_rgb_th = __load_band_cut_th(
FMT_BANDCUT_TH_PATH.format(prefix))[area_id]
band_mul_th = __load_band_cut_th(
FMT_MUL_BANDCUT_TH_PATH.format(prefix), bandsz=8)[area_id]
df_summary = _load_train_summary_data(area_id)
fn = FMT_TRAIN_MUL_STORE.format(prefix)
logger.info("Prepare image container: {}".format(fn))
with tb.open_file(fn, 'w') as f:
for image_id in tqdm.tqdm(df_train.index, total=len(df_train)):
im = get_resized_raster_8chan_image(
image_id, band_rgb_th, band_mul_th)
atom = tb.Atom.from_dtype(im.dtype)
filters = tb.Filters(complib='blosc', complevel=9)
ds = f.create_carray(f.root, image_id, atom, im.shape,
filters=filters)
ds[:] = im
fn = FMT_TEST_MUL_STORE.format(prefix)
logger.info("Prepare image container: {}".format(fn))
with tb.open_file(fn, 'w') as f:
for image_id in tqdm.tqdm(df_test.index, total=len(df_test)):
im = get_resized_raster_8chan_image_test(
image_id, band_rgb_th, band_mul_th)
atom = tb.Atom.from_dtype(im.dtype)
filters = tb.Filters(complib='blosc', complevel=9)
ds = f.create_carray(f.root, image_id, atom, im.shape,
filters=filters)
ds[:] = im
def valtrain_test_mul_image_prep(area_id):
prefix = area_id_to_prefix(area_id)
logger.info("valtrain_test_image_prep for {}".format(prefix))
df_train = pd.read_csv(
FMT_VALTRAIN_IMAGELIST_PATH.format(prefix=prefix),
index_col='ImageId')
df_test = pd.read_csv(
FMT_VALTEST_IMAGELIST_PATH.format(prefix=prefix),
index_col='ImageId')
band_rgb_th = __load_band_cut_th(
FMT_BANDCUT_TH_PATH.format(prefix))[area_id]
band_mul_th = __load_band_cut_th(
FMT_MUL_BANDCUT_TH_PATH.format(prefix), bandsz=8)[area_id]
df_summary = _load_train_summary_data(area_id)
fn | |
0.0452150644285857, -0.0220226812226592,
-0.0556777449240322, -0.106839588557335, -0.124419677875331,
-0.15802574281452, -0.0722707700506129, -0.105116727222253,
-0.693738296346539, 0.124279255191775, 0.560069134869483,
0.6841791124165, -0.107031678165138, -0.0412583559665033,
0.00896442684349225, 0.0844786906931047, 0.113582954198454,
0.0990000691857173, 0.100538046096834, 0.115895712613785,
0.0397115077690524, 0.074237842466752, 0.00737613155330012,
0.150345634380864, 0.0566752746370431, -0.0212778570711782,
-0.0563304001336339, -0.13372261621447, -0.171318995103557,
-0.141228583028678, -0.192417498343195, -0.152318039573587,
0.404255923420743, -0.304552854849217, -0.154206546410626,
-0.107031678165138, 11.9670976422831, -3.61736029523771,
1.34326036379293, -0.494153536158182, 0.0834074623603979,
-0.226433455900482, -0.192957457020386, -0.209244998351617,
-0.379519786733706, -0.389683690480388, -0.230781621465011,
-0.316949554505238, -0.0794032910847186, 0.0892123902485899,
0.134636493675703, 0.25972163924313, 0.34046358953534,
0.359567685263677, 0.241143867637169, 0.283588971081093,
0.152502921378066, -0.0916575260440732, -0.0936367010188792,
-0.0412583559665033, -3.61736029523771, 2.970388896856,
-0.526929630639825, 0.882729262382722, 0.311472631824992,
0.378478323406872, 0.140426378273093, 0.456737474558485,
0.0613966244883271, -0.122964335843988, -0.0922074692604644,
-0.128708029908938, -0.124621699881393, -0.0591566177055406,
0.0184560491961697, 0.113879979649421, 0.164485847186027,
0.18744932016437, 0.127614304692519, 0.147051420502851,
0.00918450889439832, 0.0065056153509031, -0.0316633843634197,
0.00896442684349225, 1.34326036379293, -0.526929630639825,
1.81242957779768, 0.322362109775063, 1.0323933349731,
0.457360886283924, 0.600444054321793, 0.91370612119665,
0.434301465723446, 0.088055761039491, 0.0848436101740658,
0.12744269016312, -0.0422129416844794, -0.135413959815355,
-0.118116683956611, -0.140107289290965, -0.104129953113642,
-0.0176599234366945, -0.0234972191406761, -0.00396736798305567,
-0.209302670605415, 0.150496284941485, 0.0722647584400985,
0.0844786906931047, -0.494153536158182, 0.882729262382722,
0.322362109775063, 1.27384764314234, 0.436524282156763,
0.700073089805124, 0.42086785868429, 0.839439528040047,
0.457304590724931, 0.226988664621102, 0.0985281278677041,
0.149735868306737, 0.00757050756523659, -0.0912854085559435,
-0.113724893986152, -0.155881623417337, -0.140317119716309,
-0.0713406095842423, -0.0716633370687491, -0.0803273684693989,
-0.240267712628183, 0.162046172228289, 0.0888778951412027,
0.113582954198454, 0.0834074623603979, 0.311472631824992,
1.0323933349731, 0.436524282156763, 1.53314803627766, 0.25417552361178,
0.924685362467373, 1.02972297718702, 0.744949808908217,
0.233388656964272, 0.217427541094708, 0.336055700676656,
0.03743876914053, -0.176903580046802, -0.226786128357071,
-0.334988155952527, -0.319776388666983, -0.219028256501927,
-0.17608697725841, -0.170430448469263, -0.262649114511732,
0.162469531644191, 0.136117294041357, 0.0990000691857173,
-0.226433455900482, 0.378478323406872, 0.457360886283924,
0.700073089805124, 0.25417552361178, 1.5644915898195,
-0.0975959025442556, 0.97584002950171, 0.168966508584049,
0.522744745835193, 0.0695516714823915, 0.233037922998919,
0.0395396435819605, -0.095915399510004, -0.131051837524492,
-0.237143604722523, -0.301242506702221, -0.0532025767899828,
-0.0391505765515488, -0.0655180027477513, -0.365619814022882,
0.286540735503703, 0.119855730051766, 0.100538046096834,
-0.192957457020386, 0.140426378273093, 0.600444054321793,
0.42086785868429, 0.924685362467373, -0.0975959025442556,
1.75709614368116, 0.519892887281196, 1.34059888676236,
0.0494466626673817, 0.484653667289698, 0.4084580005862,
0.114791375213036, -0.095987442819961, -0.154931741342113,
-0.33903979770467, -0.45909753833617, -0.453485705029258,
-0.522724292505745, -0.433667169565367, -0.327688080888645,
0.191866647181026, 0.191533981589997, 0.115895712613785,
-0.209244998351617, 0.456737474558485, 0.91370612119665,
0.839439528040047, 1.02972297718702, 0.97584002950171,
0.519892887281196, 2.13968207395045, -0.336164257008457,
1.46238357432731, 0.283248342097559, 0.418961909633758,
0.100598943813762, -0.113838923311929, -0.163977417541771,
-0.348791715965874, -0.506114865794541, -0.545497271000249,
-0.0923819541519448, -0.0813491527908169, -0.677140166341306,
0.613174101669279, 0.18839422518101, 0.0397115077690524,
-0.379519786733706, 0.0613966244883271, 0.434301465723446,
0.457304590724931, 0.744949808908217, 0.168966508584049,
1.34059888676236, -0.336164257008457, 4.35482101642386,
-2.21992012490562, 0.9807810786271, 0.462108282803803,
0.102236590809152, -0.17410468003079, -0.266734242517673,
-0.42033089744795, -0.347661934619728, -0.575867027882068,
-0.498450427127264, -0.934477998886557, -0.482724236080649,
0.400604659764261, 0.161358399661349, 0.074237842466752,
-0.389683690480388, -0.122964335843988, 0.088055761039491,
0.226988664621102, 0.233388656964272, 0.522744745835193,
0.0494466626673817, 1.46238357432731, -2.21992012490562,
5.99371147921815, -1.04859890355395, 0.398421384908712,
0.156461542733891, -0.019434665724302, -0.0755469444958039,
-0.280486183344505, -0.58136563218615, -0.484634147345514,
-0.356292120764518, -0.321958833095722, -0.237502268332879,
0.188505316042605, 0.111490691845143, 0.00737613155330012,
-0.230781621465011, -0.0922074692604644, 0.0848436101740658,
0.0985281278677041, 0.217427541094708, 0.0695516714823915,
0.484653667289698, 0.283248342097559, 0.9807810786271,
-1.04859890355395, 2.48580346896528, 0.24501910446873,
0.0969773944957306, -0.0119475871587249, -0.0484689252547535,
-0.149760547646666, -0.199911474519641, -0.552166680756775,
-0.678892603460011, -0.5732539695505, -0.394504397805591,
0.296392591782633, 0.122188815638708, 0.150345634380864,
-0.316949554505238, -0.128708029908938, 0.12744269016312,
0.149735868306737, 0.336055700676656, 0.233037922998919,
0.4084580005862, 0.418961909633758, 0.462108282803803,
0.398421384908712, 0.24501910446873, 0.498918611844292,
0.101227030650827, -0.178108615875722, -0.244469114785688,
-0.433333724585524, -0.51389265638907, -0.438018959207985,
-0.33953374106696, -0.343184385441028, -0.124407237788995,
0.0853113158261075, 0.0452150644285857, 0.0566752746370431,
-0.0794032910847186, -0.124621699881393, -0.0422129416844794,
0.00757050756523659, 0.03743876914053, 0.0395396435819605,
0.114791375213036, 0.100598943813762, 0.102236590809152,
0.156461542733891, 0.0969773944957306, 0.101227030650827,
0.11580938727038, 0.0398795020188062, -0.0127386826369836,
-0.078440365893966, -0.126321924455581, -0.142568428815273,
-0.128344286246162, -0.12956213066101, 0.0889824986249079,
-0.0756702387932951, -0.0220226812226592, -0.0212778570711782,
0.0892123902485899, -0.0591566177055406, -0.135413959815355,
-0.0912854085559435, -0.176903580046802, -0.095915399510004,
-0.095987442819961, -0.113838923311929, -0.17410468003079,
-0.019434665724302, -0.0119475871587249, -0.178108615875722,
0.0398795020188062, 0.193099358293502, 0.160470436488742,
0.175008323309716, 0.136917954502474, 0.0674161405311557,
0.0279662107609871, 0.0326656898702012, 0.166767589068027,
-0.125698012402281, -0.0556777449240322, -0.0563304001336339,
0.134636493675703, 0.0184560491961697, -0.118116683956611,
-0.113724893986152, -0.226786128357071, -0.131051837524492,
-0.154931741342113, -0.163977417541771, -0.266734242517673,
-0.0755469444958039, -0.0484689252547535, -0.244469114785688,
-0.0127386826369836, 0.160470436488742, 0.226474480900493,
0.241440219382646, 0.201381320129549, 0.122080977992512,
0.081142778241122, 0.0898255256880958, 0.334266702799051,
-0.246372406481652, -0.106839588557335, -0.13372261621447,
0.25972163924313, 0.113879979649421, -0.140107289290965,
-0.155881623417337, -0.334988155952527, -0.237143604722523,
-0.33903979770467, -0.348791715965874, -0.42033089744795,
-0.280486183344505, -0.149760547646666, -0.433333724585524,
-0.078440365893966, 0.175008323309716, 0.241440219382646,
0.46452011571498, 0.431899381842666, 0.29910894777825,
0.240131184980999, 0.241496481383994, 0.407758566331903,
-0.300946575673317, -0.124419677875331, -0.171318995103557,
0.34046358953534, 0.164485847186027, -0.104129953113642,
-0.140317119716309, -0.319776388666983, -0.301242506702221,
-0.45909753833617, -0.506114865794541, -0.347661934619728,
-0.58136563218615, -0.199911474519641, -0.51389265638907,
-0.126321924455581, 0.136917954502474, 0.201381320129549,
0.431899381842666, 0.744405673993396, 0.24778593600495,
0.4134406891463, 0.339886403782703, 0.47826420385775,
-0.369984827436232, -0.15802574281452, -0.141228583028678,
0.359567685263677, 0.18744932016437, -0.0176599234366945,
-0.0713406095842423, -0.219028256501927, -0.0532025767899828,
-0.453485705029258, -0.545497271000249, -0.575867027882068,
-0.484634147345514, -0.552166680756775, -0.438018959207985,
-0.142568428815273, 0.0674161405311557, 0.122080977992512,
0.29910894777825, 0.24778593600495, 1.6826091216262,
-0.575046038411989, 0.249267438526421, 0.385179621333107,
-0.300763838199813, -0.0722707700506129, -0.192417498343195,
0.241143867637169, 0.127614304692519, -0.0234972191406761,
-0.0716633370687491, -0.17608697725841, -0.0391505765515488,
-0.522724292505745, -0.0923819541519448, -0.498450427127264,
-0.356292120764518, -0.678892603460011, -0.33953374106696,
-0.128344286246162, 0.0279662107609871, 0.081142778241122,
0.240131184980999, 0.4134406891463, -0.575046038411989,
4.04150194703527, 0.746794237585425, 0.448761523202594,
-0.36676854858092, -0.105116727222253, -0.152318039573587,
0.283588971081093, 0.147051420502851, -0.00396736798305567,
-0.0803273684693989, -0.170430448469263, -0.0655180027477513,
-0.433667169565367, -0.0813491527908169, -0.934477998886557,
-0.321958833095722, -0.5732539695505, -0.343184385441028,
-0.12956213066101, 0.0326656898702012, 0.0898255256880958,
0.241496481383994, 0.339886403782703, 0.249267438526421,
0.746794237585425, 3.59868564812178
]).reshape(24, 24, order='F')
mpg_bs.rV = np.array([
-0.0297518795017788, -0.0279144173114223, -0.0184650622756606,
-0.0101929107895547, -5.5865624793419e-05, -0.000249849893142171,
-0.000309898734480696, -5.42025622874735e-06, -2.22599252448511e-05,
6.60370601160911e-05, 4.41786063148996e-05, 0.000264079369629404,
0.000312739927356773, 0.000166746800984281, 6.22512704032769e-05,
0.00023873784102746, -0.000189663494086306, -0.000244118781964904,
-5.82504534029272e-05, 0.000142876370789319, 0.000217254008213435,
4.53282887363646e-05, 1.82517606339378e-05, 8.58792421331435e-06,
0.000370322487453743, 0.000201992910699855, -0.000469164613014575,
0.000774270041235595, -5.68115342516726e-05, -0.000380679224700556,
-0.000422010861587262, 0.000101142157681932, 0.000187113782707615,
0.000120435765907176, 0.000158193260135929, 0.000318437252371348,
0.000133569503239669, 5.14508949733296e-05, 3.72708649769547e-05,
0.0481141951754968, -0.0112467428647124, 0.0133577786074344,
0.00971188595873735, 0.0144420339639255, 0.0153033177968391,
0.006359918898083, 0.00184703219892869, 0.000682133802387619,
-0.000618858940479052, -0.00012511342698143, 0.00231285339396231,
-0.00300932642921946, 0.000227034785457444, 0.00126423894576831,
0.00172845539987259, -1.90970302574768e-05, -0.000518679088367633,
-0.000314832607266587, -0.000362678746564177, -0.00132787136055019,
-0.000746870682613774, -0.000241398551814824, -0.000143617623861035,
-0.00996822090390736, -0.0382661384394529, 0.0513779162167389,
-0.0396501876325656, -0.00490503889374995, -0.00956222014702774,
-0.00419055937855679, -0.00120170608842117, -0.000442360718664101,
0.00823791090945297, 0.00500218745429801, -0.0517063795773348,
0.0587814382937658, -0.00240356824834935, -0.0116880128887736,
-0.015836689892777, -0.00705290752700762, -0.0039889034038224,
0.00281650986526543, 0.00772245566351641, 0.0200045321896821,
0.00964527245902078, 0.00407247631168687, 0.00331966549593053,
-0.00966284279156194, -0.0244550895551929, -0.0152784731227065,
-0.00892356302821995, 0.00598614795507797, 0.0158743389962298,
0.00817417514516272, 0.00287736815670271, 0.00110289732817891,
-0.00103701249016703, -0.000122137810058872, 0.00491339128850968,
-0.00573539965315568, -0.000796427359226435, -0.00388683923076641,
-0.00364002687200746, 0.00340316885739129, 0.00523232146797231,
0.000284396807821315, -0.0011396146450857, -0.0013542090929267,
-0.000300937391353895, -0.000115763147393071, -0.000113504718932038,
-0.0103409915589158, -0.0589537326887811, 0.000653338095905729,
0.0678869795656213, -0.0576789285604959, 0.00122346111634506,
-0.00217093925795169, -0.000950153161210469, -0.000423266456563166,
0.000873934414598948, 0.00266494641426755, -0.0160583032713915,
0.0167861190077875, 0.00321515210369582, 0.0158650063077605,
0.0144562648728826, -0.013440701985568, -0.0245813860385962,
-0.00906420988685205, 0.00147317779331179, 0.0113275107771079,
0.00436237793605965, 0.00281967623170031, 0.00331841030626531,
0.00926071280185207, 0.0652218789424438, 0.0488676855220674,
-0.0072693899726496, -0.0844631836304519, 0.0520383960295573,
0.0176119917063159, 0.0050650149241255, 0.00173077764428115,
-0.0178422395520497, 0.0122414128180879, 0.0530935081682878,
-0.0700498524474214, -0.00205958513348335, -0.0151068167123392,
-0.0306537850616411, -0.0330147097107778, -0.0291440083012776,
0.0131315762801279, 0.0479834543078199, 0.0494812459172924,
-0.00647839147888454, -0.00707071034488417, -0.00646049648120873,
-0.00466121288355426, -0.0485035049559369, -0.0749934388385525,
-0.0682272537597176, -0.0210539362448509, 0.103013289479923,
0.0200363284031645, -0.00191733491201945, -0.00488933018302833,
0.0021827419970009, 0.011596465584245, -0.0155626706843001,
-0.00907106241734132, -0.0122285794757302, -0.0645401411913822,
-0.0840936295641952, 0.0442913399580051, 0.142338108122544,
0.0765593704205363, 0.0182446328667233, -0.0695935839925863,
-0.0356318919794637, -0.00586377697109934, -0.000387720862907859,
-0.0219182358925276, 0.0350060947556832, 0.035455153691358,
-0.0031346913093081, -0.00606431483213071, 0.0744278437298835,
-0.0182229735631128, -0.00495131827816417, -0.00206650282892026,
-0.0969804831618052, 0.180887878378089, -0.0859863312102999,
-0.0609843554208379, 0.00613665590152679, 0.0296030378774339,
0.0222065834111922, -0.0578738162737623, -0.00993826601192342,
0.0247694369149481, 0.0422289709919424, 0.00870315910426681,
-0.0486402858772571, -0.0339176010353395, -0.00725340819016587,
-0.0277893072125588, 0.00738941478298226, 0.0360521314155386,
0.0453211474077115, 0.0414500649801317, -0.0160358299801778,
0.024230734733359, 0.0112559983037238, 0.00198150289794027,
0.0109534489965432, -0.0247890865548996, -0.0123156930059525,
0.0511677755860601, 0.0152213730389165, 0.0828149195741336,
0.097348221323021, -0.0623669711182891, -0.039401135913146,
0.117141213376684, 0.0779973249031316, -0.167651800783608,
-0.0541136339904111, 0.013955651874827, 0.0203115237423255,
0.0300361102737562, -0.0341507246067295, -0.0570717986255995,
-0.0329428638477796, -0.0291234190210299, 0.0135950582743237,
-0.0694590225085228, -0.00876695808804125, -0.00155150163487863,
-0.0415052621951564, 0.0855413818045413, -0.0567489326054377,
-0.0124458922312971, -0.00849044899817017, -0.020202207151624,
0.0791647388775217, 0.184676364774685, -0.00395852520670578,
-0.11284909647847, -0.0836319344577794, -0.0813903370310093,
-0.0387960849097079, 0.00490532414058196, 0.0302521473022626,
0.0308252615863213, -0.0365395275622032, -0.0797417067400042,
-0.074085098207374, -0.0482177700552885, 0.0419597883633913,
-0.00238151885267258, 0.00195645701970412, 0.00191752776718316,
0.0220696134622902, -0.032189920392082, 0.00758470649527134,
0.00885780502368493, -0.0328712034530519, -0.116584971692872,
0.0126625346626522, 0.18030014524035, -0.174387401016797,
0.0280216194648148, 0.206880690893945, 0.0111317590905542,
-0.0604125476723619, -0.0402701113502636, -0.0411940611934523,
-0.0292431192114974, 0.0287905486343225, 0.0603522262221798,
0.05380622472953, 0.0381256769879428, -0.00877866079967804,
-0.0114198930646073, 0.00290846811747617, -0.00936224527565544,
-0.00280843716449172, -0.00498629441063645, 0.0179721340474412,
-0.00861873414462067, 0.00565208536719023, -0.0144866624993919,
-0.0744953444072861, 0.00894509365512418, 0.0188715647674098,
0.0196999853322494, 0.0901680207441546, -0.0749065688345394,
-0.00838753112833702, 0.0531697564511473, 0.136959461408073,
0.043435117577396, -0.000485715784516, -0.0344794393341381,
-0.0460327568324399, -0.0675385675778985, -0.146473249882646,
0.278372094764221, 0.120489449499735, 0.0188465131812548,
0.0715025240762291, 0.00799416409681597, -0.105021738754416,
-0.0426408431374772, 0.0331758188834319, 0.0290793232106494,
-0.152422969531859, -0.00411226278463431, 0.0169944032769387,
-0.0129986174021856, 0.0347047561592154, 0.0623646729345188,
-0.118206811803022, -0.140694522708572, -0.140683762744576,
0.158094209672232, 0.0203620052526992, -0.0727348020063609,
-0.0884287943888251, -0.151955239271556, -0.178956415657029,
-0.099476919504814, -0.0320494342177646, -0.0236748823993347,
-0.154971928176839, -0.0330459295223386, 0.173731829954425,
0.217169238800757, 0.054629549842133, 0.164557995022997,
-0.0388578143313742, -0.00960551120542888, 0.147879602189961,
-0.272183551848361, 0.176820407653075, 0.0079050431715667,
-0.183982203945459, -0.0775309023980952, -0.0135909620051159,
-0.033800534622957, -0.00767215662608992, 0.0139341286859032,
0.0215587946038859, 0.0361091178670791, 0.0433219068631611,
-0.000962112014663084, 0.0162979158854552, -0.0476608330744487,
0.0628596343632427, 0.0333630844392246, -0.0844837951002203,
-0.114740877395659, 0.150283648331406, 0.280085162961297,
-0.282164998837566, 0.124839802199115, -0.0933873943147368,
-0.0824336720662143, 0.102655758878417, -0.0583274276749742,
0.142228337313126, 0.171933875689057, 0.135946640527651,
-0.0271990653138554, 0.0020746672622744, 0.0280363329077379,
0.0361726682845661, 0.0480894942387795, 0.0514325292442759,
-0.140942452660974, -0.0230057637890896, 0.0922577554817711,
-0.262236828942001, 0.0740944233948668, 0.197601525405761,
0.207334066529231, 0.0946452985476689, 0.118327385420593,
-0.186208569630427, 0.172171406366479, -0.168836309107086,
0.296396954609535, -0.228737880107238, 0.0103931906647384,
0.0340260435380178, -0.0826979747389254, -0.105218425287459,
0.0115530411165066, 0.0021601328799856, -0.00794017743252516,
-0.0160157183232517, -0.0217791382952013, -0.0210064613984729,
0.0604954298804515, -0.0134611171569919, 0.0107077476297973,
0.0192096848992328, -0.0359539928007615, 0.0184131758362472,
0.00718334337401242, -0.00513274287181537, -0.0395478786695696,
-0.0338833714451987, -0.0101066402795655, -0.0702469691868337,
0.0899019452166869, -0.130857170784601, 0.194797566536871,
-0.293985247792722, -0.127956937593321, 0.538381198123075,
0.0132062121841276, 0.0166577109173017, 0.0204570452515256,
0.0210302553780223, 0.00948829280246407, -0.00259466527516782,
-0.169050776708552, 0.0176449742622338, -0.0277900221053683,
-0.177046985121443, 0.0823278124705666, 0.111508059634237,
0.0974492057959729, -0.0803727412741088, -0.256237431105662,
-0.0589818545148105, -0.137232151521597, -0.0356995098286272,
-0.101362799946221, 0.0644887385614398, -0.0672851177363907,
0.0652912510995767, 0.0426358861206711, 0.0600691466471638,
0.0852565075962193, 0.0328913153615828, -0.0134924524347108,
-0.0339518938589891, -0.074055543117041, -0.0878817068993808,
-0.164392063467643, -0.118882970850557, 0.567291815602786,
0.147055341317559, -0.0948604513437437, -0.0732028485321263,
-0.0481401820760952, 0.0538839024444461, 0.241400054042709,
0.16755779144261, 0.159482918490435, 0.14059723990491,
0.122190788139167, 0.0330484822016492, 0.155417089643274,
-0.123967330958311, -0.133730317847849, 0.0123153144117149,
-0.0572155235331402, -0.0322422402398889, -0.00516798160954242,
0.0112436179619514, 0.0389807303731987, 0.0330850630219065,
0.327209565123676, -0.530425233903229, 0.32825889338139,
-0.0476988900717266, 0.0370773036879646, 0.00647572364840762,
0.0274282286286442, -0.0482426726846349, -0.211521604729665,
-0.264316078930412, -0.218782113040786, -0.251492880093616,
-0.193646739359928, -0.0968005854286071, -0.305314578727092,
-0.0974925243314355, 0.121045535316297, 0.072269403918501,
0.0019236279382869, 0.0268726912598391, 0.0370657911406326,
0.0297657267836094, 0.017596390730566, -0.021527152312842,
0.147079124119788, -0.632808359857734, -0.380412144540749,
-0.277096815862218, 0.191496518567932, 0.110751871116004,
0.104890944779502, 0.151215885925301, -0.0937289078155581,
0.274319394407329, 0.198603116965817, 0.329834669046153,
0.227801827008104, 0.33668362438987, 0.454974005598239,
0.229550331629555, 0.607282526089242, 0.149507241400591,
0.229720790556087, 0.0621158786424302, -0.0595549536705348,
-0.0938159474177122, -0.191483104083124, -0.252967102522184,
-0.219249991152887, -0.339524784291672, -0.353176242378271,
0.0813353348970341, -0.0654587134463156, -0.0246701130782029,
-0.0207301123061629, -0.0315039164064682, 0.0212813111780791,
-0.0768046947818802, -0.0393295302908703, -0.116127290200662,
0.0435247410072016, -0.245414122054282, 0.158908303150518,
-0.816734417627416, 0.881877093561075, -0.364638309241683,
-0.056786256722417, -0.00968527020423421, 0.0265910927732837,
0.0380557354124225, 0.053493718904318, 0.0242576056241561,
0.0557321652408447, 0.162137334893874, 0.217365341750675,
0.0816311056865962, -0.0596536181056574, -0.0310250346933758,
-0.0258598033359973, 1.57157757262447, -0.551846529931971,
0.178630089646898, -0.121940697227273, -0.025984796272873,
-0.0676047713589873, -0.0647127382787066, -0.0818438736871805,
-0.0939009139216642, -0.0902348993636832, -0.0508630584433755,
-0.0609219372551081, -0.013009064200119, 0.0192798284833754,
0.0264362938415028, 0.0497024606324435, 0.0643121575768081,
0.0663163083142627, 0.0640123302445087, 0.0750880869238639
]).reshape(24, 24, order='F')
mpg_bs.gcv_ubre = 5.16162045425616
mpg_bs.aic = 909.585145453506
mpg_bs.rank = 24
mpg_bs.gcv_ubre_dev = 5.16162045425363
mpg_bs.method = 'GCV'
mpg_bs.cmX = np.array([
1, 0.901477832512315, 0.58128078817734, 0.374384236453202, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
])
mpg_bs.assign = np.array([
0, 1, 2, 2
])
mpg_bs.offset = np.array([
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, | |
:py:class:`ActionOpcode <ydk.models.cisco_ios_xr.Cisco_IOS_XR_skp_qos_oper.ActionOpcode>`
"""
_prefix = 'skp-qos-oper'
_revision = '2016-02-18'
def __init__(self):
super(PlatformQos.Nodes.Node.Interfaces.Interface.Output.SkywarpQosPolicyClass.QosShowPclassSt.Marking.MarkOnly.MarkDetail, self).__init__()
self.yang_name = "mark-detail"
self.yang_parent_name = "mark-only"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_container_classes = OrderedDict([])
self._child_list_classes = OrderedDict([])
self._leafs = OrderedDict([
('mark_value', YLeaf(YType.uint8, 'mark-value')),
('action_opcode', YLeaf(YType.enumeration, 'action-opcode')),
])
self.mark_value = None
self.action_opcode = None
self._segment_path = lambda: "mark-detail"
def __setattr__(self, name, value):
self._perform_setattr(PlatformQos.Nodes.Node.Interfaces.Interface.Output.SkywarpQosPolicyClass.QosShowPclassSt.Marking.MarkOnly.MarkDetail, ['mark_value', 'action_opcode'], name, value)
class PoliceConform(Entity):
"""
Police conform mark
.. attribute:: action_type
Action type
**type**\: :py:class:`Action <ydk.models.cisco_ios_xr.Cisco_IOS_XR_skp_qos_oper.Action>`
.. attribute:: mark_detail
Mark value
**type**\: list of :py:class:`MarkDetail <ydk.models.cisco_ios_xr.Cisco_IOS_XR_skp_qos_oper.PlatformQos.Nodes.Node.Interfaces.Interface.Output.SkywarpQosPolicyClass.QosShowPclassSt.Marking.PoliceConform.MarkDetail>`
"""
_prefix = 'skp-qos-oper'
_revision = '2016-02-18'
def __init__(self):
super(PlatformQos.Nodes.Node.Interfaces.Interface.Output.SkywarpQosPolicyClass.QosShowPclassSt.Marking.PoliceConform, self).__init__()
self.yang_name = "police-conform"
self.yang_parent_name = "marking"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_container_classes = OrderedDict([])
self._child_list_classes = OrderedDict([("mark-detail", ("mark_detail", PlatformQos.Nodes.Node.Interfaces.Interface.Output.SkywarpQosPolicyClass.QosShowPclassSt.Marking.PoliceConform.MarkDetail))])
self._leafs = OrderedDict([
('action_type', YLeaf(YType.enumeration, 'action-type')),
])
self.action_type = None
self.mark_detail = YList(self)
self._segment_path = lambda: "police-conform"
def __setattr__(self, name, value):
self._perform_setattr(PlatformQos.Nodes.Node.Interfaces.Interface.Output.SkywarpQosPolicyClass.QosShowPclassSt.Marking.PoliceConform, ['action_type'], name, value)
class MarkDetail(Entity):
"""
Mark value
.. attribute:: mark_value
Mark value
**type**\: int
**range:** 0..255
.. attribute:: action_opcode
Action opcode
**type**\: :py:class:`ActionOpcode <ydk.models.cisco_ios_xr.Cisco_IOS_XR_skp_qos_oper.ActionOpcode>`
"""
_prefix = 'skp-qos-oper'
_revision = '2016-02-18'
def __init__(self):
super(PlatformQos.Nodes.Node.Interfaces.Interface.Output.SkywarpQosPolicyClass.QosShowPclassSt.Marking.PoliceConform.MarkDetail, self).__init__()
self.yang_name = "mark-detail"
self.yang_parent_name = "police-conform"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_container_classes = OrderedDict([])
self._child_list_classes = OrderedDict([])
self._leafs = OrderedDict([
('mark_value', YLeaf(YType.uint8, 'mark-value')),
('action_opcode', YLeaf(YType.enumeration, 'action-opcode')),
])
self.mark_value = None
self.action_opcode = None
self._segment_path = lambda: "mark-detail"
def __setattr__(self, name, value):
self._perform_setattr(PlatformQos.Nodes.Node.Interfaces.Interface.Output.SkywarpQosPolicyClass.QosShowPclassSt.Marking.PoliceConform.MarkDetail, ['mark_value', 'action_opcode'], name, value)
class PoliceExceed(Entity):
"""
Police exceed mark
.. attribute:: action_type
Action type
**type**\: :py:class:`Action <ydk.models.cisco_ios_xr.Cisco_IOS_XR_skp_qos_oper.Action>`
.. attribute:: mark_detail
Mark value
**type**\: list of :py:class:`MarkDetail <ydk.models.cisco_ios_xr.Cisco_IOS_XR_skp_qos_oper.PlatformQos.Nodes.Node.Interfaces.Interface.Output.SkywarpQosPolicyClass.QosShowPclassSt.Marking.PoliceExceed.MarkDetail>`
"""
_prefix = 'skp-qos-oper'
_revision = '2016-02-18'
def __init__(self):
super(PlatformQos.Nodes.Node.Interfaces.Interface.Output.SkywarpQosPolicyClass.QosShowPclassSt.Marking.PoliceExceed, self).__init__()
self.yang_name = "police-exceed"
self.yang_parent_name = "marking"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_container_classes = OrderedDict([])
self._child_list_classes = OrderedDict([("mark-detail", ("mark_detail", PlatformQos.Nodes.Node.Interfaces.Interface.Output.SkywarpQosPolicyClass.QosShowPclassSt.Marking.PoliceExceed.MarkDetail))])
self._leafs = OrderedDict([
('action_type', YLeaf(YType.enumeration, 'action-type')),
])
self.action_type = None
self.mark_detail = YList(self)
self._segment_path = lambda: "police-exceed"
def __setattr__(self, name, value):
self._perform_setattr(PlatformQos.Nodes.Node.Interfaces.Interface.Output.SkywarpQosPolicyClass.QosShowPclassSt.Marking.PoliceExceed, ['action_type'], name, value)
class MarkDetail(Entity):
"""
Mark value
.. attribute:: mark_value
Mark value
**type**\: int
**range:** 0..255
.. attribute:: action_opcode
Action opcode
**type**\: :py:class:`ActionOpcode <ydk.models.cisco_ios_xr.Cisco_IOS_XR_skp_qos_oper.ActionOpcode>`
"""
_prefix = 'skp-qos-oper'
_revision = '2016-02-18'
def __init__(self):
super(PlatformQos.Nodes.Node.Interfaces.Interface.Output.SkywarpQosPolicyClass.QosShowPclassSt.Marking.PoliceExceed.MarkDetail, self).__init__()
self.yang_name = "mark-detail"
self.yang_parent_name = "police-exceed"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_container_classes = OrderedDict([])
self._child_list_classes = OrderedDict([])
self._leafs = OrderedDict([
('mark_value', YLeaf(YType.uint8, 'mark-value')),
('action_opcode', YLeaf(YType.enumeration, 'action-opcode')),
])
self.mark_value = None
self.action_opcode = None
self._segment_path = lambda: "mark-detail"
def __setattr__(self, name, value):
self._perform_setattr(PlatformQos.Nodes.Node.Interfaces.Interface.Output.SkywarpQosPolicyClass.QosShowPclassSt.Marking.PoliceExceed.MarkDetail, ['mark_value', 'action_opcode'], name, value)
class Input(Entity):
"""
QoS policy direction ingress
.. attribute:: header
QoS EA policy header
**type**\: :py:class:`Header <ydk.models.cisco_ios_xr.Cisco_IOS_XR_skp_qos_oper.PlatformQos.Nodes.Node.Interfaces.Interface.Input.Header>`
.. attribute:: interface_parameters
QoS Interface Parameters
**type**\: :py:class:`InterfaceParameters <ydk.models.cisco_ios_xr.Cisco_IOS_XR_skp_qos_oper.PlatformQos.Nodes.Node.Interfaces.Interface.Input.InterfaceParameters>`
.. attribute:: skywarp_qos_policy_class
Skywarp QoS policy class details
**type**\: :py:class:`SkywarpQosPolicyClass <ydk.models.cisco_ios_xr.Cisco_IOS_XR_skp_qos_oper.PlatformQos.Nodes.Node.Interfaces.Interface.Input.SkywarpQosPolicyClass>`
"""
_prefix = 'skp-qos-oper'
_revision = '2016-02-18'
def __init__(self):
super(PlatformQos.Nodes.Node.Interfaces.Interface.Input, self).__init__()
self.yang_name = "input"
self.yang_parent_name = "interface"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_container_classes = OrderedDict([("header", ("header", PlatformQos.Nodes.Node.Interfaces.Interface.Input.Header)), ("interface-parameters", ("interface_parameters", PlatformQos.Nodes.Node.Interfaces.Interface.Input.InterfaceParameters)), ("skywarp-qos-policy-class", ("skywarp_qos_policy_class", PlatformQos.Nodes.Node.Interfaces.Interface.Input.SkywarpQosPolicyClass))])
self._child_list_classes = OrderedDict([])
self._leafs = OrderedDict()
self.header = PlatformQos.Nodes.Node.Interfaces.Interface.Input.Header()
self.header.parent = self
self._children_name_map["header"] = "header"
self._children_yang_names.add("header")
self.interface_parameters = PlatformQos.Nodes.Node.Interfaces.Interface.Input.InterfaceParameters()
self.interface_parameters.parent = self
self._children_name_map["interface_parameters"] = "interface-parameters"
self._children_yang_names.add("interface-parameters")
self.skywarp_qos_policy_class = PlatformQos.Nodes.Node.Interfaces.Interface.Input.SkywarpQosPolicyClass()
self.skywarp_qos_policy_class.parent = self
self._children_name_map["skywarp_qos_policy_class"] = "skywarp-qos-policy-class"
self._children_yang_names.add("skywarp-qos-policy-class")
self._segment_path = lambda: "input"
class Header(Entity):
"""
QoS EA policy header
.. attribute:: interface_name
Interface Name
**type**\: str
**length:** 0..101
.. attribute:: policy_name
Policy name
**type**\: str
**length:** 0..65
.. attribute:: direction
Direction
**type**\: str
**length:** 0..11
.. attribute:: classes
Number of classes
**type**\: int
**range:** 0..65535
"""
_prefix = 'skp-qos-oper'
_revision = '2016-02-18'
def __init__(self):
super(PlatformQos.Nodes.Node.Interfaces.Interface.Input.Header, self).__init__()
self.yang_name = "header"
self.yang_parent_name = "input"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_container_classes = OrderedDict([])
self._child_list_classes = OrderedDict([])
self._leafs = OrderedDict([
('interface_name', YLeaf(YType.str, 'interface-name')),
('policy_name', YLeaf(YType.str, 'policy-name')),
('direction', YLeaf(YType.str, 'direction')),
('classes', YLeaf(YType.uint16, 'classes')),
])
self.interface_name = None
self.policy_name = None
self.direction = None
self.classes = None
self._segment_path = lambda: "header"
def __setattr__(self, name, value):
self._perform_setattr(PlatformQos.Nodes.Node.Interfaces.Interface.Input.Header, ['interface_name', 'policy_name', 'direction', 'classes'], name, value)
class InterfaceParameters(Entity):
"""
QoS Interface Parameters
.. attribute:: interface_config_rate
Interface Configured Rate
**type**\: :py:class:`InterfaceConfigRate <ydk.models.cisco_ios_xr.Cisco_IOS_XR_skp_qos_oper.PlatformQos.Nodes.Node.Interfaces.Interface.Input.InterfaceParameters.InterfaceConfigRate>`
.. attribute:: interface_program_rate
Interface Programmed Rate
**type**\: :py:class:`InterfaceProgramRate <ydk.models.cisco_ios_xr.Cisco_IOS_XR_skp_qos_oper.PlatformQos.Nodes.Node.Interfaces.Interface.Input.InterfaceParameters.InterfaceProgramRate>`
.. attribute:: port_shaper_rate
Port Shaper Rate
**type**\: :py:class:`PortShaperRate <ydk.models.cisco_ios_xr.Cisco_IOS_XR_skp_qos_oper.PlatformQos.Nodes.Node.Interfaces.Interface.Input.InterfaceParameters.PortShaperRate>`
"""
_prefix = 'skp-qos-oper'
_revision = '2016-02-18'
def __init__(self):
super(PlatformQos.Nodes.Node.Interfaces.Interface.Input.InterfaceParameters, self).__init__()
self.yang_name = "interface-parameters"
self.yang_parent_name = "input"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_container_classes = OrderedDict([("interface-config-rate", ("interface_config_rate", PlatformQos.Nodes.Node.Interfaces.Interface.Input.InterfaceParameters.InterfaceConfigRate)), ("interface-program-rate", ("interface_program_rate", PlatformQos.Nodes.Node.Interfaces.Interface.Input.InterfaceParameters.InterfaceProgramRate)), ("port-shaper-rate", ("port_shaper_rate", PlatformQos.Nodes.Node.Interfaces.Interface.Input.InterfaceParameters.PortShaperRate))])
self._child_list_classes = OrderedDict([])
self._leafs = OrderedDict()
self.interface_config_rate = PlatformQos.Nodes.Node.Interfaces.Interface.Input.InterfaceParameters.InterfaceConfigRate()
self.interface_config_rate.parent = self
self._children_name_map["interface_config_rate"] = "interface-config-rate"
self._children_yang_names.add("interface-config-rate")
self.interface_program_rate = PlatformQos.Nodes.Node.Interfaces.Interface.Input.InterfaceParameters.InterfaceProgramRate()
self.interface_program_rate.parent = self
self._children_name_map["interface_program_rate"] = "interface-program-rate"
self._children_yang_names.add("interface-program-rate")
self.port_shaper_rate = PlatformQos.Nodes.Node.Interfaces.Interface.Input.InterfaceParameters.PortShaperRate()
self.port_shaper_rate.parent = self
self._children_name_map["port_shaper_rate"] = "port-shaper-rate"
self._children_yang_names.add("port-shaper-rate")
self._segment_path = lambda: "interface-parameters"
class InterfaceConfigRate(Entity):
"""
Interface Configured Rate
.. attribute:: value
Config value
**type**\: int
**range:** 0..4294967295
.. attribute:: unit
Config unit
**type**\: :py:class:`QosUnit <ydk.models.cisco_ios_xr.Cisco_IOS_XR_skp_qos_oper.QosUnit>`
"""
_prefix = 'skp-qos-oper'
_revision = '2016-02-18'
def __init__(self):
super(PlatformQos.Nodes.Node.Interfaces.Interface.Input.InterfaceParameters.InterfaceConfigRate, self).__init__()
self.yang_name = "interface-config-rate"
self.yang_parent_name = "interface-parameters"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_container_classes = OrderedDict([])
self._child_list_classes = OrderedDict([])
self._leafs = OrderedDict([
('value', YLeaf(YType.uint32, 'value')),
('unit', YLeaf(YType.enumeration, 'unit')),
])
self.value = None
self.unit = None
self._segment_path = lambda: "interface-config-rate"
def __setattr__(self, name, value):
self._perform_setattr(PlatformQos.Nodes.Node.Interfaces.Interface.Input.InterfaceParameters.InterfaceConfigRate, ['value', 'unit'], name, value)
class InterfaceProgramRate(Entity):
"""
Interface Programmed Rate
.. attribute:: value
Config value
**type**\: int
**range:** 0..4294967295
.. attribute:: unit
Config unit
**type**\: :py:class:`QosUnit <ydk.models.cisco_ios_xr.Cisco_IOS_XR_skp_qos_oper.QosUnit>`
"""
_prefix = 'skp-qos-oper'
_revision = '2016-02-18'
def __init__(self):
super(PlatformQos.Nodes.Node.Interfaces.Interface.Input.InterfaceParameters.InterfaceProgramRate, self).__init__()
self.yang_name = "interface-program-rate"
self.yang_parent_name = "interface-parameters"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_container_classes = OrderedDict([])
self._child_list_classes = OrderedDict([])
self._leafs = OrderedDict([
('value', YLeaf(YType.uint32, 'value')),
('unit', YLeaf(YType.enumeration, 'unit')),
])
self.value = None
self.unit = None
self._segment_path = lambda: "interface-program-rate"
def __setattr__(self, name, value):
self._perform_setattr(PlatformQos.Nodes.Node.Interfaces.Interface.Input.InterfaceParameters.InterfaceProgramRate, ['value', 'unit'], name, value)
class PortShaperRate(Entity):
"""
Port Shaper Rate
.. attribute:: value
Config value
**type**\: int
**range:** 0..4294967295
.. attribute:: unit
Config unit
**type**\: :py:class:`QosUnit <ydk.models.cisco_ios_xr.Cisco_IOS_XR_skp_qos_oper.QosUnit>`
"""
_prefix = 'skp-qos-oper'
_revision = '2016-02-18'
def __init__(self):
super(PlatformQos.Nodes.Node.Interfaces.Interface.Input.InterfaceParameters.PortShaperRate, self).__init__()
self.yang_name = "port-shaper-rate"
self.yang_parent_name = "interface-parameters"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_container_classes = OrderedDict([])
self._child_list_classes = OrderedDict([])
self._leafs = OrderedDict([
('value', YLeaf(YType.uint32, 'value')),
('unit', YLeaf(YType.enumeration, 'unit')),
])
self.value = None
self.unit = None
self._segment_path = lambda: "port-shaper-rate"
def __setattr__(self, name, value):
self._perform_setattr(PlatformQos.Nodes.Node.Interfaces.Interface.Input.InterfaceParameters.PortShaperRate, ['value', 'unit'], name, value)
class SkywarpQosPolicyClass(Entity):
"""
Skywarp QoS policy class details
.. attribute:: qos_show_pclass_st
qos show pclass st
**type**\: list of :py:class:`QosShowPclassSt <ydk.models.cisco_ios_xr.Cisco_IOS_XR_skp_qos_oper.PlatformQos.Nodes.Node.Interfaces.Interface.Input.SkywarpQosPolicyClass.QosShowPclassSt>`
"""
_prefix = 'skp-qos-oper'
_revision = '2016-02-18'
def __init__(self):
super(PlatformQos.Nodes.Node.Interfaces.Interface.Input.SkywarpQosPolicyClass, self).__init__()
self.yang_name = "skywarp-qos-policy-class"
self.yang_parent_name = "input"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_container_classes = OrderedDict([])
self._child_list_classes = OrderedDict([("qos-show-pclass-st", ("qos_show_pclass_st", PlatformQos.Nodes.Node.Interfaces.Interface.Input.SkywarpQosPolicyClass.QosShowPclassSt))])
self._leafs = OrderedDict()
self.qos_show_pclass_st = YList(self)
self._segment_path = lambda: "skywarp-qos-policy-class"
def __setattr__(self, name, value):
self._perform_setattr(PlatformQos.Nodes.Node.Interfaces.Interface.Input.SkywarpQosPolicyClass, [], name, value)
class QosShowPclassSt(Entity):
"""
qos show pclass st
.. attribute:: queue
QoS Queue parameters
**type**\: :py:class:`Queue <ydk.models.cisco_ios_xr.Cisco_IOS_XR_skp_qos_oper.PlatformQos.Nodes.Node.Interfaces.Interface.Input.SkywarpQosPolicyClass.QosShowPclassSt.Queue>`
.. attribute:: shape
QoS EA Shaper parameters
**type**\: :py:class:`Shape <ydk.models.cisco_ios_xr.Cisco_IOS_XR_skp_qos_oper.PlatformQos.Nodes.Node.Interfaces.Interface.Input.SkywarpQosPolicyClass.QosShowPclassSt.Shape>`
.. attribute:: wfq
QoS WFQ parameters
**type**\: :py:class:`Wfq <ydk.models.cisco_ios_xr.Cisco_IOS_XR_skp_qos_oper.PlatformQos.Nodes.Node.Interfaces.Interface.Input.SkywarpQosPolicyClass.QosShowPclassSt.Wfq>`
.. attribute:: police
QoS Policer parameters
**type**\: :py:class:`Police <ydk.models.cisco_ios_xr.Cisco_IOS_XR_skp_qos_oper.PlatformQos.Nodes.Node.Interfaces.Interface.Input.SkywarpQosPolicyClass.QosShowPclassSt.Police>`
.. attribute:: marking
QoS Mark parameters
**type**\: :py:class:`Marking <ydk.models.cisco_ios_xr.Cisco_IOS_XR_skp_qos_oper.PlatformQos.Nodes.Node.Interfaces.Interface.Input.SkywarpQosPolicyClass.QosShowPclassSt.Marking>`
.. attribute:: class_level
Class level
**type**\: int
**range:** 0..255
.. attribute:: class_name
Class name
**type**\: str
**length:** 0..65
"""
_prefix = 'skp-qos-oper'
_revision = '2016-02-18'
def __init__(self):
super(PlatformQos.Nodes.Node.Interfaces.Interface.Input.SkywarpQosPolicyClass.QosShowPclassSt, self).__init__()
self.yang_name = "qos-show-pclass-st"
self.yang_parent_name = "skywarp-qos-policy-class"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_container_classes = OrderedDict([("queue", ("queue", PlatformQos.Nodes.Node.Interfaces.Interface.Input.SkywarpQosPolicyClass.QosShowPclassSt.Queue)), ("shape", ("shape", PlatformQos.Nodes.Node.Interfaces.Interface.Input.SkywarpQosPolicyClass.QosShowPclassSt.Shape)), ("wfq", ("wfq", PlatformQos.Nodes.Node.Interfaces.Interface.Input.SkywarpQosPolicyClass.QosShowPclassSt.Wfq)), ("police", ("police", PlatformQos.Nodes.Node.Interfaces.Interface.Input.SkywarpQosPolicyClass.QosShowPclassSt.Police)), ("marking", ("marking", PlatformQos.Nodes.Node.Interfaces.Interface.Input.SkywarpQosPolicyClass.QosShowPclassSt.Marking))])
self._child_list_classes | |
Calculation of the right part """
zeros = '1' + len(rightmost)*'0'
length = int(zeros)
next = int(rightmost)/length
list_of_numbers = []
length = 0
while length <= 20:
if next * 2< 1:
list_of_numbers.append(0)
next = next * 2
else:
next = next * 2
num = int(next)
list_of_numbers.append(1)
next = next - num
pass
length += 1
numbers2 = ''
for val in range(len(list_of_numbers)):
number = str(list_of_numbers[val])
numbers2 = numbers2 + number
# print(f"The Decimal -> Binary Conversion is {numbers}.{numbers2.rstrip('0')}")
color = random.choice([RED, GREEN, YELLOW, BLUE, MAGENTA, CYAN])
return f" {BOLD} {color} The Decimal -> Binary Conversion is {numbers}.{numbers2.rstrip('0')} {RESET}"
# Decimal -> Octal Conversion
def Decimal_to_Octal(x : str) -> str:
"""
It Converts the Given Decimal Number into Octal Number System of Base `8` and takes input in `str` form
Args:
x `(str)` : It is the Positional Argument by order which stores the Decimal Input from User.
Returns (str) : The Output `returned` is in the form of a `str` which is the Octal Converted Number.
"""
""" For Recognising the Dot """
list1 = list(x)
left = []
right = []
flag = False
for val in range(len(list1)):
if list1[val] == "." or flag == True:
if list1[val] != ".":
right.append(list1[val])
else:
flag = True
continue
else:
num = int(list1[val])
left.append(num)
""" For Shifting the left elements in list into a variable """
leftmost = 0
for val in left:
leftmost = leftmost*10 + val
""" For Shifting the right elements in list into a variable """
rightmost = ''
for val in right:
rightmost = rightmost + val
""" Calculating the left part """
rem = 0
cur = 0
next = leftmost
list_of_numbers = []
while next != 0:
rem = next%8
list_of_numbers.append(rem)
cur = next//8
next = cur
list_of_numbers.reverse()
numbers = 0
for val in range(len(list_of_numbers)):
numbers = numbers*10 + list_of_numbers[val]
""" Calculating the right part"""
zeros = '1' + len(rightmost)*'0'
length = int(zeros)
next = int(rightmost)/length
list_of_numbers = []
length = 0
while length <= 20:
if next * 8< 1:
list_of_numbers.append(0)
next = next * 8
else:
next = next * 8
num2 = int(next)
num = int(next)
list_of_numbers.append(num2)
next = next - num
pass
length += 1
numbers2 = ''
for val in range(len(list_of_numbers)):
number = str(list_of_numbers[val])
numbers2 = numbers2 + number
# print(f"The Decimal -> Octal Conversion is {numbers}.{numbers2.rstrip('0')}")
color = random.choice([RED, GREEN, YELLOW, BLUE, MAGENTA, CYAN])
return f" {BOLD} {color} The Decimal -> Octal Conversion is {numbers}.{numbers2.rstrip('0')} {RESET}"
# Decimal -> Hexadecimal Conversion
def Decimal_to_Hexadecimal(x : str) -> str:
"""
It Converts the Given Decimal Number into Hexadecimal Number System of Base `16` and takes input in `str` form
Args:
x `(str)` : It is the Positional Argument by order which stores the Decimal Input from User.
Returns (str) : The Output `returned` is in the form of a `str` which is the Hexadecimal Converted Number.
"""
""" For Recognising the Dot """
list1 = list(x)
left = []
right = []
flag = False
for val in range(len(list1)):
if list1[val] == "." or flag == True:
if list1[val] != ".":
right.append(list1[val])
else:
flag = True
continue
else:
num = int(list1[val])
left.append(num)
""" For Shifting the left elements in list into a variable """
leftmost = 0
for val in left:
leftmost = leftmost*10 + val
""" For Shifting the right elements in list into a variable """
rightmost = ''
for val in right:
rightmost = rightmost + val
dict = {10: "A", 11 : "B", 12 : "C", 13 : "D", 14 : "E", 15 : "F"}
""" Calculation of the left part """
cur = 0
rem = 0
next = leftmost
list_of_numbers = []
while next != 0:
rem = next%16
if rem > 9:
if rem in dict:
rem = dict[rem]
list_of_numbers.append(rem)
else:
pass
else:
list_of_numbers.append(rem)
cur = next//16
next = cur
list_of_numbers.reverse()
numbers = ''
for val in range(len(list_of_numbers)):
string = str(list_of_numbers[val])
numbers = numbers + string
""" Calculation of the right part """
zeros = '1' + len(rightmost)*'0'
length = int(zeros)
next = int(rightmost)/length
list_of_numbers = []
length = 0
while length <= 20:
if next * 16< 1:
list_of_numbers.append(0)
next = (next * 16)
else:
next = (next * 16)
num2 = int(next)
if num2 > 9:
if num2 in dict:
alter = dict[num2]
list_of_numbers.append(alter)
else:
pass
else:
list_of_numbers.append(num2)
num = int(next)
next = next - num
pass
length += 1
numbers2 = ''
for val in range(len(list_of_numbers)):
number = str(list_of_numbers[val])
numbers2 = numbers2 + number
# print(f"The Decimal -> Hexadecimal Conversion is {numbers}.{numbers2.rstrip('0')}")
color = random.choice([RED, GREEN, YELLOW, BLUE, MAGENTA, CYAN])
return f" {BOLD} {color} The Decimal -> Hexadecimal Conversion is {numbers}.{numbers2.rstrip('0')} {RESET}"
# Octal -> [ ] Conversion
# Octal -> Decimal Conversion
def Octal_to_Decimal(x : str) -> str:
"""
It Converts the Given Octal Number into Decimal Number System of Base `10` and takes input in `str` form
Args:
x `(str)` : It is the Positional Argument by order which stores the Octal Input from User.
Returns (str) : The Output `returned` is in the form of a `str` which is the Decimal Converted Number.
"""
""" For Recognising the Dot """
list1 = list(x)
left = []
right = []
flag = False
for val in range(len(list1)):
if list1[val] == "." or flag == True:
if list1[val] != ".":
num = int(list1[val])
right.append(num)
else:
flag = True
continue
else:
num = int(list1[val])
left.append(num)
""" For Shifting the left elements in list into a variable """
leftmost = 0
for val in left:
leftmost = leftmost*10 + val
""" For Shifting the right elements in list into a variable """
rightmost = 0
for val in right:
rightmost = rightmost*10 + val
"""
Part to Check if the left part of '.' entered is an Octal Number or not.
"""
check = leftmost
flag = False
while check != 0:
rem = check%10
if rem <= 7:
check //= 10
else:
flag = True
break
"""
Part to Check if the right part of '.' entered is an Octal Number or not.
"""
check = rightmost
flag2 = False
while check != 0:
rem = check%10
if rem <= 7:
check //= 10
else:
flag2 = True
break
if flag == False and flag2 == False:
""" Calculation of the left part """
temp1 = leftmost
sum1 = 0
rem1 = 0
j = 0
while temp1 != 0:
rem1 = temp1%10
power = pow(8,j)
sum1 = sum1 + rem1*power
temp1 //= 10
j += 1
""" Calculation of the right part """
string = str(rightmost)
temp2 = list(string)
sum2 = 0
rem2 = 0
j = -1
while temp2 != []:
power = pow(8,j)
number = int(temp2[0])
sum2 = sum2 + number*power
temp2.pop(0)
j -= 1
# print(f"The Octal -> Decimal Conversion is {sum1+sum2}")
color = random.choice([RED, GREEN, YELLOW, BLUE, MAGENTA, CYAN])
return f" {BOLD} {color} The Octal -> Decimal Conversion is {sum1+sum2} {RESET}"
# Octal -> Binary Conversion
def Octal_to_Binary(x : str) -> str:
"""
It Converts the Given Octal Number into Binary Number System of Base `2` and takes input in `str` form
Args:
x `(str)` : It is the Positional Argument by order which stores the Octal Input from User.
Returns (str) : | |
SALES_QUOTE_LINES = "salesQuoteLines"
SHIPMENT_METHOD = "shipmentMethod"
class Enum31(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
ID = "id"
AMOUNT = "amount"
APPLIES_TO_INVOICE_ID = "appliesToInvoiceId"
APPLIES_TO_INVOICE_NUMBER = "appliesToInvoiceNumber"
COMMENT = "comment"
CONTACT_ID = "contactId"
CUSTOMER_ID = "customerId"
CUSTOMER_NUMBER = "customerNumber"
DESCRIPTION = "description"
DOCUMENT_NUMBER = "documentNumber"
EXTERNAL_DOCUMENT_NUMBER = "externalDocumentNumber"
JOURNAL_DISPLAY_NAME = "journalDisplayName"
LAST_MODIFIED_DATE_TIME = "lastModifiedDateTime"
LINE_NUMBER = "lineNumber"
POSTING_DATE = "postingDate"
CUSTOMER = "customer"
class Enum310(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
ID = "id"
AMOUNT_DECIMAL_PLACES = "amountDecimalPlaces"
AMOUNT_ROUNDING_PRECISION = "amountRoundingPrecision"
CODE = "code"
DISPLAY_NAME = "displayName"
LAST_MODIFIED_DATE_TIME = "lastModifiedDateTime"
SYMBOL = "symbol"
class Enum311(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
ID = "id"
ADDRESS = "address"
BLOCKED = "blocked"
CURRENCY_CODE = "currencyCode"
CURRENCY_ID = "currencyId"
DISPLAY_NAME = "displayName"
EMAIL = "email"
LAST_MODIFIED_DATE_TIME = "lastModifiedDateTime"
NUMBER = "number"
PAYMENT_METHOD_ID = "paymentMethodId"
PAYMENT_TERMS_ID = "paymentTermsId"
PHONE_NUMBER = "phoneNumber"
SHIPMENT_METHOD_ID = "shipmentMethodId"
TAX_AREA_DISPLAY_NAME = "taxAreaDisplayName"
TAX_AREA_ID = "taxAreaId"
TAX_LIABLE = "taxLiable"
TAX_REGISTRATION_NUMBER = "taxRegistrationNumber"
TYPE = "type"
WEBSITE = "website"
CURRENCY = "currency"
PAYMENT_METHOD = "paymentMethod"
PAYMENT_TERM = "paymentTerm"
PICTURE = "picture"
SHIPMENT_METHOD = "shipmentMethod"
class Enum312(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
ASTERISK = "*"
CURRENCY = "currency"
PAYMENT_METHOD = "paymentMethod"
PAYMENT_TERM = "paymentTerm"
PICTURE = "picture"
SHIPMENT_METHOD = "shipmentMethod"
class Enum313(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
ID = "id"
AMOUNT_DECIMAL_PLACES = "amountDecimalPlaces"
AMOUNT_ROUNDING_PRECISION = "amountRoundingPrecision"
CODE = "code"
DISPLAY_NAME = "displayName"
LAST_MODIFIED_DATE_TIME = "lastModifiedDateTime"
SYMBOL = "symbol"
class Enum314(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
ID = "id"
CODE = "code"
DISPLAY_NAME = "displayName"
LAST_MODIFIED_DATE_TIME = "lastModifiedDateTime"
class Enum315(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
ID = "id"
CALCULATE_DISCOUNT_ON_CREDIT_MEMOS = "calculateDiscountOnCreditMemos"
CODE = "code"
DISCOUNT_DATE_CALCULATION = "discountDateCalculation"
DISCOUNT_PERCENT = "discountPercent"
DISPLAY_NAME = "displayName"
DUE_DATE_CALCULATION = "dueDateCalculation"
LAST_MODIFIED_DATE_TIME = "lastModifiedDateTime"
class Enum316(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
ID = "id"
ID_DESC = "id desc"
CONTENT = "content"
CONTENT_DESC = "content desc"
CONTENT_TYPE = "contentType"
CONTENT_TYPE_DESC = "contentType desc"
HEIGHT = "height"
HEIGHT_DESC = "height desc"
WIDTH = "width"
WIDTH_DESC = "width desc"
class Enum317(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
ID = "id"
CONTENT = "content"
CONTENT_TYPE = "contentType"
HEIGHT = "height"
WIDTH = "width"
class Enum318(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
ID = "id"
CONTENT = "content"
CONTENT_TYPE = "contentType"
HEIGHT = "height"
WIDTH = "width"
class Enum319(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
ID = "id"
CODE = "code"
DISPLAY_NAME = "displayName"
LAST_MODIFIED_DATE_TIME = "lastModifiedDateTime"
class Enum320(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
ID = "id"
CALCULATE_DISCOUNT_ON_CREDIT_MEMOS = "calculateDiscountOnCreditMemos"
CODE = "code"
DISCOUNT_DATE_CALCULATION = "discountDateCalculation"
DISCOUNT_PERCENT = "discountPercent"
DISPLAY_NAME = "displayName"
DUE_DATE_CALCULATION = "dueDateCalculation"
LAST_MODIFIED_DATE_TIME = "lastModifiedDateTime"
class Enum321(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
ID = "id"
ID_DESC = "id desc"
ACCOUNT_ID = "accountId"
ACCOUNT_ID_DESC = "accountId desc"
AMOUNT_EXCLUDING_TAX = "amountExcludingTax"
AMOUNT_EXCLUDING_TAX_DESC = "amountExcludingTax desc"
AMOUNT_INCLUDING_TAX = "amountIncludingTax"
AMOUNT_INCLUDING_TAX_DESC = "amountIncludingTax desc"
DESCRIPTION = "description"
DESCRIPTION_DESC = "description desc"
DISCOUNT_AMOUNT = "discountAmount"
DISCOUNT_AMOUNT_DESC = "discountAmount desc"
DISCOUNT_APPLIED_BEFORE_TAX = "discountAppliedBeforeTax"
DISCOUNT_APPLIED_BEFORE_TAX_DESC = "discountAppliedBeforeTax desc"
DISCOUNT_PERCENT = "discountPercent"
DISCOUNT_PERCENT_DESC = "discountPercent desc"
DOCUMENT_ID = "documentId"
DOCUMENT_ID_DESC = "documentId desc"
ITEM_ID = "itemId"
ITEM_ID_DESC = "itemId desc"
LINE_TYPE = "lineType"
LINE_TYPE_DESC = "lineType desc"
NET_AMOUNT = "netAmount"
NET_AMOUNT_DESC = "netAmount desc"
NET_AMOUNT_INCLUDING_TAX = "netAmountIncludingTax"
NET_AMOUNT_INCLUDING_TAX_DESC = "netAmountIncludingTax desc"
NET_TAX_AMOUNT = "netTaxAmount"
NET_TAX_AMOUNT_DESC = "netTaxAmount desc"
QUANTITY = "quantity"
QUANTITY_DESC = "quantity desc"
SEQUENCE = "sequence"
SEQUENCE_DESC = "sequence desc"
TAX_CODE = "taxCode"
TAX_CODE_DESC = "taxCode desc"
TAX_PERCENT = "taxPercent"
TAX_PERCENT_DESC = "taxPercent desc"
TOTAL_TAX_AMOUNT = "totalTaxAmount"
TOTAL_TAX_AMOUNT_DESC = "totalTaxAmount desc"
UNIT_OF_MEASURE_ID = "unitOfMeasureId"
UNIT_OF_MEASURE_ID_DESC = "unitOfMeasureId desc"
UNIT_PRICE = "unitPrice"
UNIT_PRICE_DESC = "unitPrice desc"
class Enum322(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
ID = "id"
ACCOUNT_ID = "accountId"
AMOUNT_EXCLUDING_TAX = "amountExcludingTax"
AMOUNT_INCLUDING_TAX = "amountIncludingTax"
DESCRIPTION = "description"
DISCOUNT_AMOUNT = "discountAmount"
DISCOUNT_APPLIED_BEFORE_TAX = "discountAppliedBeforeTax"
DISCOUNT_PERCENT = "discountPercent"
DOCUMENT_ID = "documentId"
ITEM_ID = "itemId"
LINE_TYPE = "lineType"
NET_AMOUNT = "netAmount"
NET_AMOUNT_INCLUDING_TAX = "netAmountIncludingTax"
NET_TAX_AMOUNT = "netTaxAmount"
QUANTITY = "quantity"
SEQUENCE = "sequence"
TAX_CODE = "taxCode"
TAX_PERCENT = "taxPercent"
TOTAL_TAX_AMOUNT = "totalTaxAmount"
UNIT_OF_MEASURE_ID = "unitOfMeasureId"
UNIT_PRICE = "unitPrice"
ACCOUNT = "account"
ITEM = "item"
class Enum323(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
ASTERISK = "*"
ACCOUNT = "account"
ITEM = "item"
class Enum324(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
ID = "id"
ACCOUNT_ID = "accountId"
AMOUNT_EXCLUDING_TAX = "amountExcludingTax"
AMOUNT_INCLUDING_TAX = "amountIncludingTax"
DESCRIPTION = "description"
DISCOUNT_AMOUNT = "discountAmount"
DISCOUNT_APPLIED_BEFORE_TAX = "discountAppliedBeforeTax"
DISCOUNT_PERCENT = "discountPercent"
DOCUMENT_ID = "documentId"
ITEM_ID = "itemId"
LINE_TYPE = "lineType"
NET_AMOUNT = "netAmount"
NET_AMOUNT_INCLUDING_TAX = "netAmountIncludingTax"
NET_TAX_AMOUNT = "netTaxAmount"
QUANTITY = "quantity"
SEQUENCE = "sequence"
TAX_CODE = "taxCode"
TAX_PERCENT = "taxPercent"
TOTAL_TAX_AMOUNT = "totalTaxAmount"
UNIT_OF_MEASURE_ID = "unitOfMeasureId"
UNIT_PRICE = "unitPrice"
ACCOUNT = "account"
ITEM = "item"
class Enum325(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
ASTERISK = "*"
ACCOUNT = "account"
ITEM = "item"
class Enum326(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
ID = "id"
BLOCKED = "blocked"
CATEGORY = "category"
DISPLAY_NAME = "displayName"
LAST_MODIFIED_DATE_TIME = "lastModifiedDateTime"
NUMBER = "number"
SUB_CATEGORY = "subCategory"
class Enum327(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
ID = "id"
BASE_UNIT_OF_MEASURE_ID = "baseUnitOfMeasureId"
BLOCKED = "blocked"
DISPLAY_NAME = "displayName"
GTIN = "gtin"
INVENTORY = "inventory"
ITEM_CATEGORY_CODE = "itemCategoryCode"
ITEM_CATEGORY_ID = "itemCategoryId"
LAST_MODIFIED_DATE_TIME = "lastModifiedDateTime"
NUMBER = "number"
PRICE_INCLUDES_TAX = "priceIncludesTax"
TAX_GROUP_CODE = "taxGroupCode"
TAX_GROUP_ID = "taxGroupId"
TYPE = "type"
UNIT_COST = "unitCost"
UNIT_PRICE = "unitPrice"
ITEM_CATEGORY = "itemCategory"
PICTURE = "picture"
class Enum328(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
ASTERISK = "*"
ITEM_CATEGORY = "itemCategory"
PICTURE = "picture"
class Enum329(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
ID = "id"
CODE = "code"
DISPLAY_NAME = "displayName"
LAST_MODIFIED_DATE_TIME = "lastModifiedDateTime"
class Enum33(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
ID = "id"
AMOUNT = "amount"
APPLIES_TO_INVOICE_ID = "appliesToInvoiceId"
APPLIES_TO_INVOICE_NUMBER = "appliesToInvoiceNumber"
COMMENT = "comment"
CONTACT_ID = "contactId"
CUSTOMER_ID = "customerId"
CUSTOMER_NUMBER = "customerNumber"
DESCRIPTION = "description"
DOCUMENT_NUMBER = "documentNumber"
EXTERNAL_DOCUMENT_NUMBER = "externalDocumentNumber"
JOURNAL_DISPLAY_NAME = "journalDisplayName"
LAST_MODIFIED_DATE_TIME = "lastModifiedDateTime"
LINE_NUMBER = "lineNumber"
POSTING_DATE = "postingDate"
CUSTOMER = "customer"
class Enum330(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
ID = "id"
ID_DESC = "id desc"
CONTENT = "content"
CONTENT_DESC = "content desc"
CONTENT_TYPE = "contentType"
CONTENT_TYPE_DESC = "contentType desc"
HEIGHT = "height"
HEIGHT_DESC = "height desc"
WIDTH = "width"
WIDTH_DESC = "width desc"
class Enum331(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
ID = "id"
CONTENT = "content"
CONTENT_TYPE = "contentType"
HEIGHT = "height"
WIDTH = "width"
class Enum332(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
ID = "id"
CONTENT = "content"
CONTENT_TYPE = "contentType"
HEIGHT = "height"
WIDTH = "width"
class Enum333(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
ID = "id"
CODE = "code"
DISPLAY_NAME = "displayName"
LAST_MODIFIED_DATE_TIME = "lastModifiedDateTime"
class Enum334(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
ID = "id"
ID_DESC = "id desc"
CODE = "code"
CODE_DESC = "code desc"
DISPLAY_NAME = "displayName"
DISPLAY_NAME_DESC = "displayName desc"
LAST_MODIFIED_DATE_TIME = "lastModifiedDateTime"
LAST_MODIFIED_DATE_TIME_DESC = "lastModifiedDateTime desc"
class Enum335(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
ID = "id"
CODE = "code"
DISPLAY_NAME = "displayName"
LAST_MODIFIED_DATE_TIME = "lastModifiedDateTime"
class Enum336(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
ID = "id"
CODE = "code"
DISPLAY_NAME = "displayName"
LAST_MODIFIED_DATE_TIME = "lastModifiedDateTime"
class Enum337(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
ID = "id"
ID_DESC = "id desc"
CODE = "code"
CODE_DESC = "code desc"
DISPLAY_NAME = "displayName"
DISPLAY_NAME_DESC = "displayName desc"
LAST_MODIFIED_DATE_TIME = "lastModifiedDateTime"
LAST_MODIFIED_DATE_TIME_DESC = "lastModifiedDateTime desc"
TAX_TYPE = "taxType"
TAX_TYPE_DESC = "taxType desc"
class Enum338(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
ID = "id"
CODE = "code"
DISPLAY_NAME = "displayName"
LAST_MODIFIED_DATE_TIME = "lastModifiedDateTime"
TAX_TYPE = "taxType"
class Enum339(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
ID = "id"
CODE = "code"
DISPLAY_NAME = "displayName"
LAST_MODIFIED_DATE_TIME = "lastModifiedDateTime"
TAX_TYPE = "taxType"
class Enum340(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
ID = "id"
ID_DESC = "id desc"
CODE = "code"
CODE_DESC = "code desc"
DISPLAY_NAME = "displayName"
DISPLAY_NAME_DESC = "displayName desc"
LAST_MODIFIED_DATE_TIME = "lastModifiedDateTime"
LAST_MODIFIED_DATE_TIME_DESC = "lastModifiedDateTime desc"
TAX_TYPE = "taxType"
TAX_TYPE_DESC = "taxType desc"
class Enum341(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
ID = "id"
CODE = "code"
DISPLAY_NAME = "displayName"
LAST_MODIFIED_DATE_TIME = "lastModifiedDateTime"
TAX_TYPE = "taxType"
class Enum342(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
ID = "id"
CODE = "code"
DISPLAY_NAME = "displayName"
LAST_MODIFIED_DATE_TIME = "lastModifiedDateTime"
TAX_TYPE = "taxType"
class Enum343(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
ID = "id"
ID_DESC = "id desc"
CODE = "code"
CODE_DESC = "code desc"
DISPLAY_NAME = "displayName"
DISPLAY_NAME_DESC = "displayName desc"
INTERNATIONAL_STANDARD_CODE = "internationalStandardCode"
INTERNATIONAL_STANDARD_CODE_DESC = "internationalStandardCode desc"
LAST_MODIFIED_DATE_TIME = "lastModifiedDateTime"
LAST_MODIFIED_DATE_TIME_DESC = "lastModifiedDateTime desc"
class Enum344(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
ID = "id"
CODE = "code"
DISPLAY_NAME = "displayName"
INTERNATIONAL_STANDARD_CODE = "internationalStandardCode"
LAST_MODIFIED_DATE_TIME = "lastModifiedDateTime"
class Enum345(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
ID = "id"
CODE = "code"
DISPLAY_NAME = "displayName"
INTERNATIONAL_STANDARD_CODE = "internationalStandardCode"
LAST_MODIFIED_DATE_TIME = "lastModifiedDateTime"
class Enum346(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
ID = "id"
ID_DESC = "id desc"
ADDRESS = "address"
ADDRESS_DESC = "address desc"
BALANCE = "balance"
BALANCE_DESC = "balance desc"
BLOCKED = "blocked"
BLOCKED_DESC = "blocked desc"
CURRENCY_CODE = "currencyCode"
CURRENCY_CODE_DESC = "currencyCode desc"
CURRENCY_ID = "currencyId"
CURRENCY_ID_DESC = "currencyId desc"
DISPLAY_NAME = "displayName"
DISPLAY_NAME_DESC = "displayName desc"
EMAIL = "email"
EMAIL_DESC = "email desc"
LAST_MODIFIED_DATE_TIME = "lastModifiedDateTime"
LAST_MODIFIED_DATE_TIME_DESC = "lastModifiedDateTime desc"
NUMBER = "number"
NUMBER_DESC = "number | |
getdata = GetData()
getdata.inventory = reply_inv
log.debug("send GetData in reply to Inv for %s item(s)" % len(reply_inv))
self.send_message(getdata)
else:
if self.have_all_block_data():
self.loop_exit()
def add_sender_info( self, sender_txhash, nulldata_vin_outpoint, sender_out_data ):
"""
Record sender information in our block info.
@sender_txhash: txid of the sender
@nulldata_vin_outpoint: the 'vout' index from the nulldata tx input that this transaction funded
"""
assert sender_txhash in self.sender_info.keys(), "Missing sender info for %s" % sender_txhash
assert nulldata_vin_outpoint in self.sender_info[sender_txhash], "Missing outpoint %s for sender %s" % (nulldata_vin_outpoint, sender_txhash)
block_hash = self.sender_info[sender_txhash][nulldata_vin_outpoint]['block_hash']
relindex = self.sender_info[sender_txhash][nulldata_vin_outpoint]['relindex']
relinput_index = self.sender_info[sender_txhash][nulldata_vin_outpoint]['relinput']
value_in = sender_out_data['value']
script_pubkey = sender_out_data['scriptPubKey']['hex']
script_type = sender_out_data['scriptPubKey']['type']
addresses = sender_out_data['scriptPubKey'].get("addresses", [])
sender_info = {
"amount": value_in,
"script_pubkey": script_pubkey,
"script_type": script_type,
"addresses": addresses,
"nulldata_vin_outpoint": nulldata_vin_outpoint,
"txid": sender_txhash
}
# debit this tx's total value
self.block_info[block_hash]['txns'][relindex]['fee'] += int(value_in * 10**8)
# remember this sender, but put it in the right place.
# senders[i] must correspond to tx['vin'][i]
self.block_info[block_hash]['txns'][relindex]['senders'][relinput_index] = sender_info
self.block_info[block_hash]['num_senders'] += 1
return True
def parse_tx_input( self, inp ):
"""
Given a tx input, turn it into an easy-to-read
dict (i.e. like what bitcoind would give us).
"""
scriptsig = binascii.hexlify( inp.signature_script )
prev_txid = "%064x" % inp.previous_output.out_hash
ret = {
"vout": inp.previous_output.index,
"txid": prev_txid,
"scriptSig": {
"hex": scriptsig,
"asm": bits.tx_script_to_asm(scriptsig)
}
}
return ret
def parse_tx_output( self, i, outp ):
"""
Given a tx output, turn it into an easy-to-read
dict (i.e. like what bitcoind would give us).
"""
scriptpubkey = binascii.hexlify( outp.pk_script )
script_info = bits.tx_output_parse_scriptPubKey( scriptpubkey )
return {
"value": Decimal(outp.value) / Decimal(10**8),
"n": i,
"scriptPubKey": script_info
}
def parse_tx( self, txn, block_header, block_hash, txindex ):
"""
Given a transaction message and its index in the block,
go and create a "verbose" transaction structure
containing all the information in a nice, easy-to-read
dict (i.e. like what bitcoind would give us).
Does not work on coinbase transactions.
"""
txn_serializer = TxSerializer()
tx_bin = txn_serializer.serialize(txn)
txdata = {
"version": txn.version,
"locktime": txn.lock_time,
"hex": binascii.hexlify( tx_bin ),
"txid": txn.calculate_hash(),
"size": len( tx_bin ),
"blockhash": block_hash,
"blocktime": block_header.get('timestamp', 0),
"vin": [],
"vout": [],
# non-standard; added by us for virtualchain
"txindex": txindex,
"relindex": None,
"senders": None,
"fee": 0,
"nulldata": None
}
for inp in txn.tx_in:
input_info = self.parse_tx_input( inp )
txdata['vin'].append( input_info )
for i in xrange(0, len(txn.tx_out)):
outp = txn.tx_out[i]
output_info = self.parse_tx_output( i, outp )
txdata['vout'].append( output_info )
# we know how many senders there have to be
txdata['senders'] = [None] * len(txdata['vin'])
return txdata
def make_sender_info( self, block_hash, txn, i ):
"""
Make sender information bundle for a particular input of
a nulldata transaction.
We'll use it to go find the transaction output that
funded the ith input of the given tx.
"""
inp = txn['vin'][i]
ret = {
# to be filled in...
"amount_in": 0,
"scriptPubKey": None,
"addresses": None,
# for matching the input this sender funded
"txindex": txn['txindex'],
"relindex": txn['relindex'],
"output_index": inp['vout'],
"block_hash": block_hash,
"relinput": i
}
return ret
def handle_block( self, message_header, block ):
"""
Got a block.
* validate it
* load its transactions
* ask for each transaction's sender transaction
"""
if self.have_all_block_data():
self.loop_exit()
return
block_hash = block.calculate_hash()
# is this a solicited block?
if block_hash not in self.block_info.keys():
log.error("Ignoring unsolicited block %s" % block_hash)
return
header = self.block_info[block_hash]['header']
height = self.block_info[block_hash]['height']
log.debug("handle block %s (%s)" % (height, block_hash))
# does this block's transaction hashes match the merkle root?
tx_hashes = [block.txns[i].calculate_hash() for i in xrange(0, len(block.txns))]
mr = pybitcoin.MerkleTree( tx_hashes ).root()
if mr != header['merkle_root']:
log.error("Merkle root of %s (%s) mismatch: expected %s, got %s" % (block_hash, height, header['merkle_root'], mr))
return
nulldata_txs = []
relindex = 0
for txindex in xrange(0, len(block.txns)):
txdata = self.parse_tx( block.txns[txindex], header, block_hash, txindex )
# if there is no nulldata output, then we don't care about this one.
has_nulldata = False
nulldata_payload = None
for outp in txdata['vout']:
if outp['scriptPubKey']['type'] == 'nulldata':
has_nulldata = True
nulldata_payload = bitcoin.deserialize_script(outp['scriptPubKey']['hex'])[1]
if type(nulldata_payload) not in [str, unicode]:
# this is a malformed OP_RETURN, where the varint that should follow OP_RETURN doesn't have the data behind it.
# just take the data after the varint, no matter what it is (i.e. "6a52" will be "")
nulldata_payload = outp['scriptPubKey']['hex'][4:]
# count all txs processed
self.num_txs_processed += 1
if not has_nulldata:
continue
# remember nulldata
txdata['nulldata'] = nulldata_payload
# calculate total output (part of fee; will be debited when we discover the senders)
txdata['fee'] -= sum( int(out['value'] * 10**8) for out in txdata['vout'] )
# remember the relative tx index (i.e. the ith nulldata tx)
txdata['relindex'] = relindex
# do we actually want this?
if self.tx_filter is not None:
if not self.tx_filter( txdata ):
continue
# yup, we want it!
relindex += 1
nulldata_txs.append( txdata )
self.block_info[block_hash]['txns'] = nulldata_txs
self.block_info[block_hash]['num_txns'] = len(block.txns)
self.block_info[block_hash]['num_senders'] = 0
# get each input's transaction
sender_txhashes = []
for txn in self.block_info[block_hash]['txns']:
for i in xrange(0, len(txn['vin'])):
# record information about the transaction
# that created this input (so we can go find
# it later).
inp = txn['vin'][i]
sender_txid = inp['txid']
inp_sender_outp = inp['vout']
if str(sender_txid) not in sender_txhashes:
sender_txhashes.append( str(sender_txid) )
sinfo = self.make_sender_info( block_hash, txn, i )
if not self.sender_info.has_key(sender_txid):
# map outpoint for this input to the tx info
self.sender_info[sender_txid] = {}
# sinfo is the information from the output in
# the sender-tx that funded inp
self.sender_info[sender_txid][inp_sender_outp] = sinfo
# update accounting...
self.num_blocks_received += 1
self.block_info[block_hash]['handled'] = True
log.debug("Request %s nulldata sender TXs" % len(sender_txhashes))
if self.have_all_block_data():
self.loop_exit()
return
def fetch_txs_rpc( self, bitcoind_opts, txids ):
"""
Fetch the given list of transactions
via the JSON-RPC interface.
Return a dict of parsed transactions on success,
keyed by txid.
Return None on error
"""
headers = {'content-type': 'application/json'}
reqs = []
ret = {}
for i in xrange(0, len(txids)):
txid = txids[i]
if txid == "0000000000000000000000000000000000000000000000000000000000000000":
# coinbase; we never send these
ret[txid] = {
"version": 1,
"locktime": 0,
"vin": [],
"vout": [
{
"n": 0xffffffff,
"scriptPubKey": {
"asm": "",
"hex": "",
"type": "coinbase"
},
"value": 0 # not really 0, but we don't care about coinbases anyway
}
],
"txid": txid,
}
continue
req = {'method': 'getrawtransaction', 'params': [txid, 0], 'jsonrpc': '2.0', 'id': i}
reqs.append( req )
proto = "http"
if bitcoind_opts.has_key('bitcoind_use_https') and bitcoind_opts['bitcoind_use_https']:
proto = "https"
server_url = "%s://%s:%s@%s:%s" % (proto, bitcoind_opts['bitcoind_user'], bitcoind_opts['bitcoind_passwd'], bitcoind_opts['bitcoind_server'], bitcoind_opts['bitcoind_port'])
try:
resp = requests.post( server_url, headers=headers, data=simplejson.dumps(reqs), verify=False )
except Exception, e:
log.exception(e)
log.error("Failed to fetch %s transactions" % len(txids))
return None
# get responses
try:
resp_json = resp.json()
assert type(resp_json) in [list]
except Exception, e:
log.exception(e)
log.error("Failed to parse transactions")
return None
try:
for resp in resp_json:
assert 'result' in resp, "Missing result"
txhex = resp['result']
assert txhex is not None, "Invalid RPC response '%s' (for %s)" % (simplejson.dumps(resp), txids[resp['id']])
try:
tx_bin = txhex.decode('hex')
assert tx_bin is not None
tx_hash_bin = pybitcoin.bin_double_sha256(tx_bin)[::-1]
assert tx_hash_bin is not None
tx_hash = tx_hash_bin.encode('hex')
assert tx_hash is not None
except Exception, e:
log.error("Failed to calculate txid of %s" % txhex)
raise
# solicited transaction?
assert tx_hash in txids, "Unsolicited transaction %s" % tx_hash
# unique?
if tx_hash in ret.keys():
continue
# parse from hex string
txn_serializer = TxSerializer()
txn = txn_serializer.deserialize( StringIO( binascii.unhexlify(txhex) ) )
ret[tx_hash] = self.parse_tx( txn, {}, "", -1 )
except Exception, e:
log.exception(e)
log.error("Failed to receive transactions")
return None
return ret
if __name__ == "__main__":
# test synchonize headers
try:
bitcoind_server = sys.argv[1]
headers_path = sys.argv[2]
height = int(sys.argv[3])
start_height = int(sys.argv[4])
except:
print >> sys.stderr, "Usage: %s bitcoind_server headers_path blockchain_height block_start_height" % sys.argv[0]
sys.exit(0)
log.setLevel(logging.DEBUG)
SPVClient.init( headers_path )
rc = SPVClient.sync_header_chain( headers_path, bitcoind_server, height )
if rc:
print "Headers are up to date with %s and seem to have sufficient proof-of-work" % height
host = bitcoind_server
port = 8333
if ":" in host:
host = bitcoind_server.split(":")[0]
port | |
<filename>Ctrax/draw.py
# draw.py
# KMB 11/10/08
import os
import pdb
import sys
import matplotlib
#matplotlib.use( 'WXAgg' )
matplotlib.interactive( True )
import matplotlib.backends.backend_wxagg
import matplotlib.figure
import matplotlib.cm
import matplotlib.pyplot as plt
if hasattr( plt, 'tight_layout' ): # matplotlib >= 1.1
HAS_TIGHTLAYOUT = True
else:
HAS_TIGHTLAYOUT = False
import matplotlib.transforms as mtransforms
import numpy as num
import wx
from wx import xrc
import motmot.wxvalidatedtext.wxvalidatedtext as wxvt # part of Motmot
from params import params
#######################################################################
# PlotConstants
#######################################################################
class PlotConstants:
def __init__( self ):
self.vel_bins = 100
self.vel_x_min=self.vel_x_max=self.vel_y_min=self.vel_y_max = 'a'
self.dth_bins = 100
self.dth_x_min=self.dth_x_max=self.dth_y_min=self.dth_y_max = 'a'
## self.orn_bins = 90
## self.orn_x_min=self.orn_x_max=self.orn_y_min=self.orn_y_max = 'a'
## self.space_bins = 50
## self.space_x_min=self.space_x_max=self.space_y_min=self.space_y_max = 'a'
self.pos_binsize = 50
self.pos_x_min=self.pos_x_max=self.pos_y_min=self.pos_y_max = 'a'
self.filepath = None
const = PlotConstants()
#######################################################################
# PlotPanel
#######################################################################
# class from http://www.scipy.org/Matplotlib_figure_in_a_wx_panel
# although menu/right-click/save functionality was added here
class PlotPanel(wx.Panel):
"""The PlotPanel has a Figure and a Canvas. OnSize events simply set a
flag, and the actually redrawing of the figure is triggered by an Idle event."""
###################################################################
# __init__()
###################################################################
def __init__(self, parent, id=wx.ID_ANY, color=None,
dpi=None, style=wx.NO_FULL_REPAINT_ON_RESIZE, **kwargs):
wx.Panel.__init__( self, parent, id=id, style=style, **kwargs )
self.figure = matplotlib.figure.Figure( None, dpi )
self.canvas = matplotlib.backends.backend_wxagg.FigureCanvasWxAgg( self, -1, self.figure )
self.SetColor( color )
self.Bind( wx.EVT_IDLE, self._onIdle )
self.Bind( wx.EVT_SIZE, self._onSize )
self._resizeflag = True
self._SetSize()
self.canvas.Bind( wx.EVT_RIGHT_UP, self.OnRightMouseButton )
self.canvas.mpl_connect( 'draw_event', self.post_draw )
###################################################################
# OnRightMouseButton()
###################################################################
def OnRightMouseButton( self, evt ):
"""Right mouse button pressed; pop up save menu."""
menu = wx.Menu()
file_save_item = wx.MenuItem( menu, wx.ID_ANY, "Save as..." )
menu.AppendItem( file_save_item )
self.canvas.Bind( wx.EVT_MENU, self.OnMenuSave )
self.canvas.PopupMenu( menu )
###################################################################
# OnMenuSave()
###################################################################
def OnMenuSave( self, evt ):
"""User has chosen to save this figure as an image file.
Prompt for filename and save."""
# the extension on filename determines file format
# create text list of allowed file extensions
extensions = {'.eps': 'encapsulated postscript (*.eps)',
'.png': 'portable network graphics (*.png)',
'.pdf': 'portable data format (*.pdf)'}
dialog_str = ''
ext_list = []
for ext, txt in extensions.iteritems():
dialog_str += txt + '|*' + ext + '|'
ext_list.append( ext )
dialog_str = dialog_str[:-1]
# make default filename
(dirname, filename) = os.path.split( const.filepath )
(basename, ext) = os.path.splitext( filename )
if self.__class__.__name__ == 'TrajectoryPlotPanel':
if '_trajplot' not in basename:
basename += '_trajplot'
elif self.__class__.__name__ == 'PosHistPanel':
if '_poshist' not in basename:
basename += '_poshist'
elif self.__class__.__name__ == 'VelocityPlotPanel':
if '_velocityplot' not in basename:
basename += '_velocityplot'
elif self.__class__.__name__ == 'VelPlotPanel':
if '_velhist' not in basename:
basename += '_velhist'
elif self.__class__.__name__ == 'TurnPlotPanel':
if '_turnhist' not in basename:
basename += '_turnhist'
elif self.__class__.__name__ == 'SpacePlotPanel':
if '_spaceplot' not in basename:
basename += '_spaceplot'
elif self.__class__.__name__ == 'OrnPlotPanel':
if '_ornplot' not in basename:
basename += '_ornplot'
# show dialog
dlg = wx.FileDialog( self.canvas, "Save as image...", dirname, basename, wildcard=dialog_str, style=wx.SAVE )
if dlg.ShowModal() == wx.ID_OK:
# get entered filename and extension
const.filepath = dlg.GetPath()
selected_ext = ext_list[dlg.GetFilterIndex()]
if not const.filepath.endswith( selected_ext ):
const.filepath += selected_ext
print "saving to", const.filepath
try:
self.figure.savefig( const.filepath )
except IOError:
wx.MessageBox( "Miscellaneous error while saving.",
"Write Error", wx.ICON_ERROR|wx.OK )
# everybody loves a "miscellaneous error" message
dlg.Destroy()
###################################################################
# SetColor()
###################################################################
def SetColor(self, rgbtuple):
"""Set figure and canvas colours to be the same."""
if not rgbtuple:
rgbtuple = wx.SystemSettings.GetColour(wx.SYS_COLOUR_BTNFACE).Get()
col = [c/255.0 for c in rgbtuple]
self.figure.set_facecolor(col)
self.figure.set_edgecolor(col)
self.canvas.SetBackgroundColour(wx.Colour(*rgbtuple))
###################################################################
# _onSize()
###################################################################
def _onSize(self, event):
self._resizeflag = True
###################################################################
# _onIdle()
###################################################################
def _onIdle(self, evt):
if self._resizeflag:
self._resizeflag = False
self._SetSize()
self.draw()
###################################################################
# _SetSize()
###################################################################
def _SetSize(self, pixels=None):
"""This method can be called to force the Plot to be a desired size,
which defaults to the ClientSize of the panel."""
if not pixels:
pixels = self.GetClientSize()
self.canvas.SetSize(pixels)
self.figure.set_size_inches(pixels[0]/self.figure.get_dpi(),
pixels[1]/self.figure.get_dpi())
###################################################################
# draw()
###################################################################
def draw( self ):
"""Drawing events handled here. Create subplots, label axes, plot."""
pass
###################################################################
# post_draw()
###################################################################
def post_draw( self, evt ):
"""Post-drawing events handled here. Rescale plots to fit in panel."""
if HAS_TIGHTLAYOUT and False:
plt.tight_layout()
else:
# http://matplotlib.sourceforge.net/faq/howto_faq.html#automatically-make-room-for-tick-labels
# http://stackoverflow.com/questions/4018860/text-box-in-matplotlib
bboxes = []
# Cycle through all artists in all the axes in the figure
for ax in self.figure.axes:
for artist in ax.get_children():
# If it's a text artist, grab its bounding box...
if isinstance(artist, matplotlib.text.Text):
bbox = artist.get_window_extent()
# the figure transform goes from relative->pixels and
# we want the inverse of that
bboxi = bbox.inverse_transformed(self.figure.transFigure)
bboxes.append(bboxi)
# cycle through all tick labels, too
for label in ax.get_xticklabels() + ax.get_yticklabels():
bbox = label.get_window_extent()
bboxi = bbox.inverse_transformed(self.figure.transFigure)
bboxes.append(bboxi)
# this is the bbox that bounds all the bboxes, again in
# relative figure coords
if len( bboxes ) == 0: return
size = mtransforms.Bbox.union(bboxes)
margins = self.figure.subplotpars
vspace = size.height - (margins.top - margins.bottom)
if size.height > 0.8:
self.figure.subplots_adjust( bottom=margins.bottom + vspace/2.,
top=margins.top - vspace/2. )
elif size.height < 0.5:
self.figure.subplots_adjust( bottom=margins.bottom - vspace/2.,
top=margins.top + vspace/2. )
hspace = size.width - (margins.right - margins.left)
if size.width > 0.8:
self.figure.subplots_adjust( left=margins.left + hspace/2.,
right=margins.right - hspace/2. )
elif size.width < 0.5:
self.figure.subplots_adjust( left=margins.left - hspace/2.,
right=margins.right + hspace/2. )
# Temporarily disconnect any callbacks to the draw event...
# (To avoid recursion)
func_handles = self.figure.canvas.callbacks.callbacks[evt.name]
self.figure.canvas.callbacks.callbacks[evt.name] = {}
# Re-draw the figure..
self.figure.canvas.draw()
# Reset the draw event callbacks
self.figure.canvas.callbacks.callbacks[evt.name] = func_handles
#######################################################################
# TrajectoryPlotPanel
#######################################################################
class TrajectoryPlotPanel (PlotPanel):
###################################################################
# __init__()
###################################################################
def __init__( self, parent, data ):
"""Initialize a trajectory plot panel with data."""
PlotPanel.__init__( self, parent )
self.x = num.zeros( (len( data ),max( len( data[0] ), params.nids )), dtype=num.float32 )
self.y = num.zeros( self.x.shape, dtype=num.float32 )
for fi, frame in enumerate( data ):
for ei, ellipse in frame.iteritems():
if ei >= self.x.shape[1]: break
self.x[fi,ei] = ellipse.center.x
self.y[fi,ei] = ellipse.center.y
###################################################################
# draw()
###################################################################
def draw( self ):
"""Draw the data."""
if not hasattr( self, 'subplot' ):
self.subplot = self.figure.add_subplot( 111, aspect='equal' )
self.subplot.set_title( "Positions in first %d frames"%self.x.shape[0], fontsize=12 )
self.subplot.set_xlabel( "x (pixels)" )
self.subplot.set_ylabel( "y (pixels)" )
self.subplot.plot( self.x, self.y )
#######################################################################
# VelocityPlotPanel
#######################################################################
class VelocityPlotPanel (PlotPanel):
###################################################################
# __init__()
###################################################################
def __init__( self, parent, data ):
"""Initialize a velocity plot panel with data."""
PlotPanel.__init__( self, parent )
self.x = num.zeros( (len( data ) - 1,), dtype=num.float32 )
self.y = num.zeros( (len( data ) - 1,max( len( data[0] ), params.nids )), dtype=num.float32 )
for fi in range( len( data ) - 1 ):
self.x[fi] = fi
for ei in range( self.y.shape[1] ):
if data[fi].hasItem( ei ) and data[fi + 1].hasItem( ei ):
pos = data[fi][ei]
next_pos = data[fi + 1][ei]
self.y[fi,ei] = pos.Euc_dist( next_pos )
try:
self.y_mn = num.mean( self.y, 1 )
self.y_st = num.std( self.y, 1 )
except FloatingPointError:
self.y_mn = self.x
self.y_st = self.x
#self.y_env = num.hstack( (self.y_mn + self.y_st,
# self.y_mn[::-1] - self.y_st[::-1]) )
#self.x_env = num.hstack( (self.x, self.x[::-1]) )
###################################################################
# draw()
###################################################################
def draw( self ):
"""Draw the data."""
if not hasattr( self, 'subplot' ):
self.subplot = self.figure.add_subplot( 111 )
self.subplot.set_title( "Velocities in first %d frames"%self.x.shape[0], fontsize=12 )
self.subplot.set_xlabel( "frame" )
self.subplot.set_ylabel( "velocity (pixels/frame)\nmean +/- S.D." )
self.subplot.plot( self.x, self.y_mn, 'k' )
self.subplot.fill_between( self.x, self.y_mn + self.y_st, self.y_mn - self.y_st, alpha=0.5, edgecolor='none', facecolor='k' )
if self.y.shape[1] < 10:
try:
self.subplot.plot( self.x, self.y )
except ZeroDivisionError:
pass
#######################################################################
# PosHistPanel
#######################################################################
class PosHistPanel( PlotPanel ):
###################################################################
# __init__()
###################################################################
def __init__( self, frame, data, width, height ):
PlotPanel.__init__( self, frame )
self.data = data
# fill grid
self.grid = num.zeros( (width/const.pos_binsize+1, height/const.pos_binsize+1) )
for frame in data:
for fly in frame.itervalues():
fly_x = fly.center.x
fly_y = fly.center.y
try: self.grid[int(fly_x/const.pos_binsize),int(fly_y/const.pos_binsize)] += 1
except IndexError:
print "error adding", fly_x, fly_y, "to bin", fly_x/const.pos_binsize, fly_y/const.pos_binsize, "(size is", self.grid.shape, "\b)"
# increment grid
# for each fly
# for each frame
self.xticks = range( 0, self.grid.shape[0], max( 200/const.pos_binsize, 1 ) )
self.xticklabels = []
for xx in self.xticks:
self.xticklabels.append( str(xx*const.pos_binsize) )
self.yticks = range( 0, self.grid.shape[1], max( 200/const.pos_binsize, 1 ) )
self.yticklabels = []
for yy in self.yticks:
self.yticklabels.append( str(yy*const.pos_binsize) )
###################################################################
# draw()
###################################################################
def draw( self ):
first = False
if not hasattr(self, 'subplot'):
self.subplot = self.figure.add_subplot( 111, aspect='equal' )
self.subplot.set_title("Position histogram for first %d frames"%len( self.data ), fontsize = 12)
self.subplot.set_xlabel( | |
# Copyright (c) OpenMMLab. All rights reserved.
import copy
import csv
import json
import os.path as osp
import warnings
from collections import OrderedDict, defaultdict
import mmcv
import numpy as np
import torch.distributed as dist
from mmcv.runner import get_dist_info
from mmcv.utils import print_log
from mmdet.core import eval_map
from .builder import DATASETS
from .custom import CustomDataset
@DATASETS.register_module()
class OpenImagesDataset(CustomDataset):
"""Open Images dataset for detection.
Args:
ann_file (str): Annotation file path.
label_file (str): File path of the label description file that
maps the classes names in MID format to their short
descriptions.
image_level_ann_file (str): Image level annotation, which is used
in evaluation.
get_supercategory (bool): Whether to get parent class of the
current class. Default: True.
hierarchy_file (str): The file path of the class hierarchy.
Default: None.
get_metas (bool): Whether to get image metas in testing or
validation time. This should be `True` during evaluation.
Default: True. The OpenImages annotations do not have image
metas (width and height of the image), which will be used
during evaluation. We provide two ways to get image metas
in `OpenImagesDataset`:
- 1. `load from file`: Load image metas from pkl file, which
is suggested to use. We provided a script to get image metas:
`tools/misc/get_image_metas.py`, which need to run
this script before training/testing. Please refer to
`config/openimages/README.md` for more details.
- 2. `load from pipeline`, which will get image metas during
test time. However, this may reduce the inference speed,
especially when using distribution.
load_from_file (bool): Whether to get image metas from pkl file.
meta_file (str): File path to get image metas.
filter_labels (bool): Whether filter unannotated classes.
Default: True.
load_image_level_labels (bool): Whether load and consider image
level labels during evaluation. Default: True.
file_client_args (dict): Arguments to instantiate a FileClient.
See :class:`mmcv.fileio.FileClient` for details.
Defaults to ``dict(backend='disk')``.
"""
def __init__(self,
ann_file,
label_file='',
image_level_ann_file='',
get_supercategory=True,
hierarchy_file=None,
get_metas=True,
load_from_file=True,
meta_file='',
filter_labels=True,
load_image_level_labels=True,
file_client_args=dict(backend='disk'),
**kwargs):
# may get error if use other file_client
self.file_client_args = file_client_args
self.cat2label = defaultdict(str)
self.index_dict = {}
# Although it will init file_client in `CustomDataset`,
# it needs to be init here.
file_client = mmcv.FileClient(**file_client_args)
# need get `index_dict` before load annotations
assert label_file.endswith('csv')
if hasattr(file_client, 'get_local_path'):
with file_client.get_local_path(label_file) as local_path:
class_names = self.get_classes_from_csv(local_path)
else:
class_names = self.get_classes_from_csv(label_file)
super(OpenImagesDataset, self).__init__(
ann_file=ann_file, file_client_args=file_client_args, **kwargs)
self.CLASSES = class_names
self.image_level_ann_file = image_level_ann_file
self.load_image_level_labels = load_image_level_labels
if get_supercategory is True:
assert hierarchy_file is not None
if self.__class__.__name__ == 'OpenImagesDataset':
assert hierarchy_file.endswith('json')
elif self.__class__.__name__ == 'OpenImagesChallengeDataset':
assert hierarchy_file.endswith('np')
else:
raise NotImplementedError
if hasattr(self.file_client, 'get_local_path'):
with self.file_client.get_local_path(
hierarchy_file) as local_path:
self.class_label_tree = self.get_relation_matrix(
local_path)
else:
self.class_label_tree = self.get_relation_matrix(
hierarchy_file)
self.get_supercategory = get_supercategory
self.get_metas = get_metas
self.load_from_file = load_from_file
self.meta_file = meta_file
if self.data_root is not None:
if not osp.isabs(self.meta_file):
self.meta_file = osp.join(self.data_root, self.meta_file)
self.filter_labels = filter_labels
self.rank, self.world_size = get_dist_info()
self.temp_img_metas = []
self.test_img_metas = []
self.test_img_shapes = []
self.load_from_pipeline = False if load_from_file else True
def get_classes_from_csv(self, label_file):
"""Get classes name from file.
Args:
label_file (str): File path of the label description file that
maps the classes names in MID format to their short
descriptions.
Returns:
list[str]: Class name of OpenImages.
"""
index_list = []
classes_names = []
with open(label_file, 'r') as f:
reader = csv.reader(f)
for line in reader:
self.cat2label[line[0]] = line[1]
classes_names.append(line[1])
index_list.append(line[0])
self.index_dict = {index: i for i, index in enumerate(index_list)}
return classes_names
def load_annotations(self, ann_file):
"""Load annotation from annotation file.
Special described `self.data_infos` (defaultdict[list[dict]])
in this function: Annotations where item of the defaultdict
indicates an image, each of which has (n) dicts. Keys of dicts are:
- `bbox` (list): coordinates of the box, in normalized image
coordinates, of shape 4.
- `label` (int): the label id.
- `is_group_of` (bool): Indicates that the box spans a group
of objects (e.g., a bed of flowers or a crowd of people).
- `is_occluded` (bool): Indicates that the object is occluded
by another object in the image.
- `is_truncated` (bool): Indicates that the object extends
beyond the boundary of the image.
- `is_depiction` (bool): Indicates that the object is a
depiction.
- `is_inside` (bool): Indicates a picture taken from the
inside of the object.
Args:
ann_file (str): CSV style annotation file path.
Returns:
list[dict]: Data infos where each item of the list
indicates an image. Keys of annotations are:
- `img_id` (str): Image name.
- `filename` (str): Image name with suffix.
"""
self.ann_infos = defaultdict(list)
data_infos = []
cp_filename = None
with open(ann_file, 'r') as f:
reader = csv.reader(f)
for i, line in enumerate(reader):
if i == 0:
continue
img_id = line[0]
filename = f'{img_id}.jpg'
label_id = line[2]
assert label_id in self.index_dict
label = int(self.index_dict[label_id])
bbox = [
float(line[4]), # xmin
float(line[6]), # ymin
float(line[5]), # xmax
float(line[7]) # ymax
]
is_occluded = True if int(line[8]) == 1 else False
is_truncated = True if int(line[9]) == 1 else False
is_group_of = True if int(line[10]) == 1 else False
is_depiction = True if int(line[11]) == 1 else False
is_inside = True if int(line[12]) == 1 else False
self.ann_infos[img_id].append(
dict(
bbox=bbox,
label=label,
is_occluded=is_occluded,
is_truncated=is_truncated,
is_group_of=is_group_of,
is_depiction=is_depiction,
is_inside=is_inside))
if filename != cp_filename:
data_infos.append(dict(img_id=img_id, filename=filename))
cp_filename = filename
return data_infos
def get_ann_info(self, idx):
"""Get OpenImages annotation by index.
Args:
idx (int): Index of data.
Returns:
dict: Annotation info of specified index.
"""
img_id = self.data_infos[idx]['img_id']
bboxes = []
labels = []
bboxes_ignore = []
labels_ignore = []
is_occludeds = []
is_truncateds = []
is_group_ofs = []
is_depictions = []
is_insides = []
for obj in self.ann_infos[img_id]:
label = int(obj['label'])
bbox = [
float(obj['bbox'][0]),
float(obj['bbox'][1]),
float(obj['bbox'][2]),
float(obj['bbox'][3])
]
bboxes.append(bbox)
labels.append(label)
# Other parameters
is_occludeds.append(obj['is_occluded'])
is_truncateds.append(obj['is_truncated'])
is_group_ofs.append(obj['is_group_of'])
is_depictions.append(obj['is_depiction'])
is_insides.append(obj['is_inside'])
if not bboxes:
bboxes = np.zeros((0, 4))
labels = np.zeros((0, ))
else:
bboxes = np.array(bboxes)
labels = np.array(labels)
if not bboxes_ignore:
bboxes_ignore = np.zeros((0, 4))
labels_ignore = np.zeros((0, ))
else:
bboxes_ignore = np.array(bboxes_ignore)
labels_ignore = np.array(labels_ignore)
assert len(is_group_ofs) == len(labels) == len(bboxes)
gt_is_group_ofs = np.array(is_group_ofs, dtype=np.bool)
# These parameters is not used yet.
is_occludeds = np.array(is_occludeds, dtype=np.bool)
is_truncateds = np.array(is_truncateds, dtype=np.bool)
is_depictions = np.array(is_depictions, dtype=np.bool)
is_insides = np.array(is_insides, dtype=np.bool)
ann = dict(
bboxes=bboxes.astype(np.float32),
labels=labels.astype(np.int64),
bboxes_ignore=bboxes_ignore.astype(np.float32),
labels_ignore=labels_ignore.astype(np.int64),
gt_is_group_ofs=gt_is_group_ofs,
is_occludeds=is_occludeds,
is_truncateds=is_truncateds,
is_depictions=is_depictions,
is_insides=is_insides)
return ann
def get_meta_from_file(self, meta_file=''):
"""Get image metas from pkl file."""
metas = mmcv.load(
meta_file,
file_format='pkl',
file_client_args=self.file_client_args)
assert len(metas) == len(self)
for i in range(len(metas)):
file_name = osp.split(metas[i]['filename'])[-1]
img_info = self.data_infos[i].get('img_info', None)
if img_info is not None:
assert file_name == osp.split(img_info['filename'])[-1]
else:
assert file_name == self.data_infos[i]['filename']
hw = metas[i]['ori_shape'][:2]
self.test_img_shapes.append(hw)
def get_meta_from_pipeline(self, results):
"""Get image metas from pipeline."""
self.temp_img_metas.extend(results['img_metas'])
if dist.is_available() and self.world_size > 1:
from mmdet.apis.test import collect_results_cpu
self.test_img_metas = collect_results_cpu(self.temp_img_metas,
len(self))
else:
self.test_img_metas = self.temp_img_metas
def get_img_shape(self, metas):
"""Set images original shape into data_infos."""
assert len(metas) == len(self)
for i in range(len(metas)):
file_name = osp.split(metas[i].data['ori_filename'])[-1]
img_info = self.data_infos[i].get('img_info', None)
if img_info is not None:
assert file_name == osp.split(img_info['filename'])[-1]
else:
assert file_name == self.data_infos[i]['filename']
hw = metas[i].data['ori_shape'][:2]
self.test_img_shapes.append(hw)
def prepare_test_img(self, idx):
"""Get testing data after pipeline."""
img_info = self.data_infos[idx]
results = dict(img_info=img_info)
if self.proposals is not None:
results['proposals'] = self.proposals[idx]
self.pre_pipeline(results)
results = self.pipeline(results)
if self.get_metas and self.load_from_pipeline:
self.get_meta_from_pipeline(results)
return results
def _filter_imgs(self, min_size=32):
"""Filter images too small."""
if self.filter_empty_gt:
warnings.warn('OpenImageDatasets does not support '
'filtering empty gt images.')
valid_inds = [i for i in range(len(self))]
return valid_inds
def _set_group_flag(self):
"""Set flag according to image aspect ratio."""
self.flag = np.zeros(len(self), dtype=np.uint8)
# TODO: set flag without width and height
def get_relation_matrix(self, hierarchy_file):
"""Get hierarchy for classes.
Args:
hierarchy_file (sty): File path to the hierarchy for classes.
Returns:
ndarray: The matrix of the corresponding relationship between
the parent class and the child class, of shape
(class_num, class_num).
"""
if self.data_root is not None:
if not osp.isabs(hierarchy_file):
hierarchy_file = osp.join(self.data_root, hierarchy_file)
with open(hierarchy_file, 'r') as f:
hierarchy = json.load(f)
class_num = len(self.CLASSES)
class_label_tree = np.eye(class_num, class_num)
class_label_tree = self._convert_hierarchy_tree(
hierarchy, class_label_tree)
return class_label_tree
def _convert_hierarchy_tree(self,
hierarchy_map,
class_label_tree,
parents=[],
get_all_parents=True):
"""Get matrix of the corresponding relationship between the parent
class and the child class.
Args:
hierarchy_map (dict): Including label name and corresponding
subcategory. Keys of dicts are:
- `LabeName` (str): Name of the label.
| |
# -*- coding: utf-8 -*-
import collections
import inspect
import time
import random
import selenium
from selenium.webdriver import Ie, Opera, Chrome, Firefox, PhantomJS
from selenium.webdriver.remote.webdriver import WebDriver
from selenium.webdriver.remote.webelement import WebElement
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support.ui import Select
from selenium.common.exceptions import (NoSuchElementException, TimeoutException,
WebDriverException, ElementNotVisibleException,
NoAlertPresentException)
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
def create(drivername, *args, **kwargs):
if not isinstance(drivername, str):
msg = "drivername should be an instance of string. given {0}".format(type(drivername))
raise TypeError(msg)
drivers = {'ie': Ie,
'opera': Opera,
'chrome': Chrome,
'firefox': Firefox,
'phantomjs': PhantomJS}
dname = drivername.lower()
if dname in drivers:
try:
return SeleniumWrapper(drivers[dname](*args, **kwargs))
except Exception as e:
raise e
else:
msg = "".join(("drivername should be one of [IE, Opera, Chrome, Firefox, PhantomJS]",
"(case-insentive). given {0}".format(drivername)))
raise ValueError(msg)
def connect(drivername, executor, custom_capabilities=None, **kwargs):
if not isinstance(drivername, str):
msg = "drivername should be an instance of string. given {0}".format(type(drivername))
raise TypeError(msg)
if not isinstance(executor, str):
msg = "executor should be an instance of string. given {0}".format(type(executor))
raise TypeError(msg)
if custom_capabilities and not isinstance(custom_capabilities, dict):
msg = "custom_capabilities should be an instance of dict. given {0}".format(type(custom_capabilities))
raise TypeError(msg)
capabilities = {'ie': DesiredCapabilities.INTERNETEXPLORER,
'opera': DesiredCapabilities.OPERA,
'chrome': DesiredCapabilities.CHROME,
'firefox': DesiredCapabilities.FIREFOX,
'android': DesiredCapabilities.ANDROID,
'phatomjs': DesiredCapabilities.PHANTOMJS}
dname = drivername.lower()
if dname in capabilities:
capability = capabilities[dname]
custom_capabilities = custom_capabilities or {}
for key in custom_capabilities:
capability[key] = custom_capabilities[key]
driver = selenium.webdriver.Remote(executor, capability, **kwargs)
try:
return SeleniumWrapper(driver)
except Exception as e:
raise e
else:
msg = "".join(("drivername should be one of [IE, Opera, Chrome, Firefox, PhantomJS]",
"(case-insentive). given {0}".format(drivername)))
raise ValueError(msg)
def _is_wrappable(obj):
if isinstance(obj, WebDriver) or isinstance(obj, WebElement):
return True
else:
return False
def _chainreact(__getattr__):
def containment(*methodname):
def wrap_or_else(obj):
if _is_wrappable(obj):
return SeleniumWrapper(obj)
else:
return obj
self, methodobj = __getattr__(*methodname)
if inspect.isroutine(methodobj):
def reaction(*realargs):
result = methodobj(*realargs)
# for side-effective method(append, ...)
return wrap_or_else(result)
return reaction
else:
return wrap_or_else(methodobj)
return containment
class Performance(object):
def __init__(self, performance):
if not isinstance(performance, dict):
raise AttributeError('Wrapped performance object should be an instance of dict.')
self._performance = performance
@property
def memory(self):
if 'memory' in self._performance:
return Memory(self._performance['memory'])
raise AttributeError('window.performance.memory is not supported in this browser.')
@property
def navigation(self):
if 'navigation' in self._performance:
return Navigation(self._performance['navigation'])
raise AttributeError('window.performance.navigation is not supported in this browser.')
@property
def timing(self):
if 'timing' in self._performance:
return Timing(self._performance['timing'])
raise AttributeError('window.performance.timing is not supported in this browser.')
class Memory(object):
def __init__(self, memory):
self._memory = memory
def __getattr__(self, name):
return self._memory[name]
@property
def __dict__(self):
return self._memory
def __iter__(self):
return iter(self._memory)
class Navigation(object):
def __init__(self, navigation):
self._navigation = navigation
def __getattr__(self, name):
return self._navigation[name]
@property
def __dict__(self):
return self._navigation
def __iter__(self):
return iter(self._navigation)
class Timing(object):
def __init__(self, timing):
self._timing = timing
def __getattr__(self, name):
return self._timing[name]
@property
def __dict__(self):
return self._timing
def __iter__(self):
return iter(self._timing)
class SeleniumWrapper(object):
def __init__(self, driver, timeout=5, silent=False):
if _is_wrappable(driver):
self._wrapped = driver
self._timeout = timeout
self._silent = silent
else:
msg = "2nd argument should be an instance of WebDriver or WebElement. given {0}.".format(type(driver))
raise TypeError(msg)
@property
def unwrap(self):
return self._wrapped
@property
def performance(self):
if isinstance(self._wrapped, WebDriver):
if self._wrapped.execute_script("return 'performance' in window;"):
timeout = time.time() + 120
script = "return window.performance.timing.loadEventEnd;"
executor = self._wrapped.execute_script
while time.time() < timeout and not executor(script):
time.sleep(0.2)
if not executor(script):
raise Exception('Timeout!')
performance = self._wrapped.execute_script('return window.performance;')
return Performance(performance)
else:
raise AttributeError("This browser is not supporting Timing APIs.")
else:
raise AttributeError("'WebElement' object has not attribute 'performance'")
@property
def parent(self):
if isinstance(self._wrapped, WebElement):
return self.xpath("./parent::node()", timeout=self._timeout)
else:
raise AttributeError("'WebDriver' object has no attribute 'parent'")
@property
def to_select(self):
if self._is_selectable():
return Select(self.unwrap)
raise TypeError("Must be 'select' element.")
@property
def alert(self):
timeout = time.time() + self._timeout
while time.time() < timeout:
try:
alert = self._wrapped.switch_to_alert()
alert.text
return alert
except NoAlertPresentException:
time.sleep(0.2)
msg = "Wait for alert to be displayed for 2 seconds, but it was not displayed."
raise NoAlertPresentException(msg)
def _settimeout(self, timeout):
if isinstance(timeout, (int, float)):
self._timeout = timeout
else:
raise AttributeError
def _gettimeout(self):
return self._timeout
timeout = property(_gettimeout, _settimeout)
@property
def silent(self):
return self._silent
@silent.setter
def silent(self, true_of_false):
if not isinstance(true_of_false, bool):
raise AttributeError
self._silent = true_of_false
def __getattribute__(self, name):
return object.__getattribute__(self, name)
@_chainreact
def __getattr__(self, name):
return self._wrapped, getattr(self._wrapped, name)
def _is_selectable(self):
return self.unwrap.tag_name == 'select'
def _is_stopping(self, interval):
before = (self._wrapped.location['x'], self._wrapped.location['y'])
time.sleep(interval)
after = (self._wrapped.location['x'], self._wrapped.location['y'])
return before[0] == after[0] and before[1] == after[1]
def _wait_until_stopping(self, timeout, interval):
timeout = time.time() + timeout
while time.time() < timeout:
if self._is_stopping(interval):
return True
else:
time.sleep(interval)
if not self._is_stopping(interval):
raise WebDriverException("Element was not stably displayed for {sec} seconds.".format(sec=timeout))
def _wait_until_clickable(self, timeout, interval):
err_messages = []
endtime = time.time() + timeout
while True:
try:
self._wrapped.click()
break
except WebDriverException as e:
err_messages.append(e.msg.split(":")[-1].strip())
time.sleep(interval)
if (time.time() > endtime):
if err_messages:
template = ("Waited for element to be clickable for {sec} seconds, ",
"but clicked other elements. {err}")
msg = "".join(template).format(sec=timeout, err=err_messages)
raise WebDriverException(msg)
def _wait_until_displayed(self, timeout, interval):
try:
WebDriverWait(self._wrapped, timeout, interval).until(lambda d: d.is_displayed())
except TimeoutException:
template = ("Waited for element to be displayed for {sec} seconds, ",
"but <{target} ...> was not displayed:: <{dumped}>")
msg = "".join(template).format(sec=timeout, target=self._wrapped.tag_name, dumped=self._dump())
raise ElementNotVisibleException(msg)
def _dump(self):
element = self._wrapped
info = {"visibility": element.value_of_css_property("visibility"),
"display": element.value_of_css_property("display"),
"height": element.value_of_css_property("height"),
"width": element.value_of_css_property("width"),
"x": element.location["x"],
"y": element.location["y"]}
dumped = " ".join(["{k}:{v}".format(k=k, v=info[k]) for k in info])
return dumped
def attr(self, name):
if isinstance(self._wrapped, WebElement):
return self._wrapped.get_attribute(name)
else:
raise AttributeError("This is WebDriver wrapped object.")
def click(self, timeout=None, presleep=0, postsleep=0):
timeout = timeout or self._timeout
if isinstance(self._wrapped, WebElement):
try:
if presleep:
time.sleep(presleep)
self._wait_until_stopping(timeout, 0.01)
self._wait_until_displayed(timeout, 0.01)
self._wait_until_clickable(timeout, 0.01)
if postsleep:
time.sleep(postsleep)
except Exception as e:
raise e
def scroll_to(self, x, y):
if isinstance(self._wrapped, WebDriver):
return self._wrapped.execute_script("window.scrollTo({:d}, {:d})".format(x, y))
else:
raise AttributeError("This is WebElement wrapped object.")
def scroll_by(self, x, y):
if isinstance(self._wrapped, WebDriver):
return self._wrapped.execute_script("window.scrollBy({:d}, {:d})".format(x, y))
else:
raise AttributeError("This is WebElement wrapped object.")
def scroll_into_view(self, jq_identifier, align_with_top=True):
if isinstance(self._wrapped, WebDriver):
if self._wrapped.execute_script("try{return $;}catch(e){}"):
script_template = "try{{$('{0}') && $('{0}')[0].scrollIntoView({1})}}catch(e){{}}"
script = script_template.format(jq_identifier, 'true' if align_with_top else 'false')
self._wrapped.execute_script(script)
else:
raise AttributeError("You must load jquery library.")
else:
raise AttributeError("This is WebElement wrapped object.")
def waitfor(self, type, target, eager=False, timeout=None):
timeout = timeout or self._timeout
if eager:
types = {"id": lambda d: d.find_elements_by_id(target),
"name": lambda d: d.find_elements_by_name(target),
"xpath": lambda d: d.find_elements_by_xpath(target),
"link_text": lambda d: d.find_elements_by_link_text(target),
"partial_link_text": lambda d: d.find_elements_by_partial_link_text(target),
"tag": lambda d: d.find_elements_by_tag_name(target),
"class": lambda d: d.find_elements_by_class_name(target),
"css": lambda d: d.find_elements_by_css_selector(target), }
else:
types = {"id": lambda d: d.find_element_by_id(target),
"name": lambda d: d.find_element_by_name(target),
"xpath": lambda d: d.find_element_by_xpath(target),
"link_text": lambda d: d.find_element_by_link_text(target),
"partial_link_text": lambda d: d.find_element_by_partial_link_text(target),
"tag": lambda d: d.find_element_by_tag_name(target),
"class": lambda d: d.find_element_by_class_name(target),
"css": lambda d: d.find_element_by_css_selector(target), }
finder = types[type]
try:
result = WebDriverWait(self._wrapped, timeout).until(finder)
if eager and len(result):
return SeleniumContainerWrapper(result, self.timeout, self.silent)
elif _is_wrappable(result):
return SeleniumWrapper(result, self.timeout, self.silent)
else:
return result
except TimeoutException:
if self.silent:
return None
else:
template = ("Waited for element to appear for {sec} seconds, ",
"but {type}:{target} didn't appear.")
msg = "".join(template).format(sec=timeout, type=type, target=target)
raise NoSuchElementException(msg)
def xpath(self, target, eager=False, timeout=None):
return self.waitfor("xpath", target, eager, timeout)
def css(self, target, eager=False, timeout=None):
return self.waitfor("css", target, eager, timeout)
def by_tag(self, tag, eager=False, timeout=None, **attributes):
subjects = ["@{key}='{val}'".format(key=k, val=attributes[k]) for k in attributes]
subject = " and ".join(subjects)
xpath = ".//{tag}[{subject}]".format(tag=tag, subject=subject) if subject else ".//{tag}".format(tag=tag)
return self.waitfor('xpath', xpath, eager, timeout)
def by_text(self, text, tag="*", partial=False, eager=False, timeout=None):
if partial:
return self.xpath(".//{tag}[contains(text(), '{text}')]".format(tag=tag, text=text), eager, timeout)
return self.xpath(".//{tag}[text()='{text}']".format(tag=tag, text=text), eager, timeout)
def by_class(self, target, eager=False, timeout=None):
return self.waitfor("class", target, eager, timeout)
def by_id(self, target, eager=False, timeout=None):
return self.waitfor("id", target, eager, timeout)
def by_name(self, target, eager=False, timeout=None):
return self.waitfor("name", target, eager, timeout)
def by_linktxt(self, target, eager=False, timeout=None, partial=False):
if partial:
return self.waitfor("partial_link_text", target, eager, timeout=None)
else:
return self.waitfor("link_text", target, eager, timeout)
def href(self, partialurl=None, eager=False, timeout=None):
if partialurl:
return self.xpath(".//a[contains(@href, '{0}')]".format(partialurl), eager, timeout)
return self.xpath(".//a", eager, timeout)
def img(self, alt=None, ext=None, eager=False, timeout=None):
options = []
if alt:
options.append("@alt='{0}'".format(alt))
if ext:
options.append("contains(@src, '{0}')".format(ext))
option = " and ".join(options)
xpath = ".//img" + "[{0}]".format(option) if option else ".//img"
return self.xpath(xpath, eager, timeout)
def button(self, value, eager=False, timeout=None):
return self.xpath(
".//input[@type='submit' or @type='button' and @value='{0}']"
"|.//button[text()='{0}']".format(value), eager, timeout)
def checkbox(self, eager=False, timeout=None, **attributes):
attributes["type"] = "checkbox"
return self.by_tag("input", eager, timeout, **attributes)
def radio(self, eager=False, timeout=None, **attributes):
attributes["type"] = "radio"
return self.by_tag("input", | |
<reponame>axonepro/sdk-ooti<filename>resources/ooti.py<gh_stars>1-10
import requests
import json
import sys
# TODO Trouver comment refacto tous ces imports
from .accounting import Accounting
from .actions import Actions
from .annexes import Annexes
from .areas import Areas
from .auth import Auth
from .banks import Banks
from .billing import Billing
from .celery_tasks import Celery_tasks
from .clients import Clients
from .contacts import Contacts
from .contracts import Contracts
from .costs import Costs
from .countries import Countries
from .currencies import Currencies
from .customfields import Customfields
from .defaults import Defaults
from .documents import Documents
from .emails import Emails
from .employees import Employees
from .expenses import Expenses
from .fees import Fees
from .files import Files
from .goals import Goals
from .help import Help
from .helper import Helper
from .imports import Imports
from .inbound_emails import Inbound_emails
from .indicators import Indicators
from .invitations import Invitations
from .invoices import Invoices
from .jobs import Jobs
from .languages import Languages
from .lots import Lots
from .milestones import Milestones
from .newsletters import Newsletters
from .notes import Notes
from .notifications import Notifications
from .organizations import Organizations
from .orgusers import Orgusers
from .payments import Payments
from .permissions import Permissions
from .phases import Phases
from .pipelines import Pipelines
from .plans import Plans
from .posts import Posts
from .prescriptions import Prescriptions
from .profiles import Profiles
from .projections import Projections
from .projects import Projects
from .quickbooks import Quickbooks
from .reports import Reports
from .revenue import Revenue
from .revisions import Revisions
from .roles import Roles
from .stats import Stats
from .styleguides import Styleguides
from .tags import Tags
from .tasks import Tasks
from .teams import Teams
from .timelogs import Timelogs
from .timeperiods import Timeperiods
from .token_auth import Token_auth
from .token_refresh import Token_refresh
from .token_verify import Token_verify
from .trips import Trips
from .zones import Zones
# To read .env variables
import os
from dotenv import load_dotenv
# Loading environment variables (stored in .env file)
load_dotenv()
class OotiAPI(Helper):
def __init__(self, username, password, pagination=None):
self.username = username
self.password = password
self.base_url()
self.org_pk = None
self.teams_pk = None
self.access_token = None
self._csrf_token = None
self.headers = None
self.Accounting = None
self.Actions = None
self.Annexes = None
self.Areas = None
self.Auth = None
self.Banks = None
self.Billing = None
self.Celery_tasks = None
self.Clients = None
self.Contacts = None
self.Contracts = None
self.Costs = None
self.Countries = None
self.Currencies = None
self.Customfields = None
self.Defaults = None
self.Documents = None
self.Emails = None
self.Employees = None
self.Expenses = None
self.Fees = None
self.Files = None
self.Goals = None
self.Help = None
self.Helper = None
self.Imports = None
self.Inbound_emails = None
self.Indicators = None
self.Invitations = None
self.Invoices = None
self.Jobs = None
self.Languages = None
self.Lots = None
self.Milestones = None
self.Newsletters = None
self.Notes = None
self.Notifications = None
self.Organizations = None
self.Orgusers = None
self.Payments = None
self.Permissions = None
self.Phases = None
self.Pipelines = None
self.Plans = None
self.Posts = None
self.Prescriptions = None
self.Profiles = None
self.Projections = None
self.Projects = None
self.Quickbooks = None
self.Reports = None
self.Revenue = None
self.Revisions = None
self.Roles = None
self.Stats = None
self.Styleguides = None
self.Tags = None
self.Tasks = None
self.Teams = None
self.Timelogs = None
self.Timeperiods = None
self.Token_auth = None
self.Token_refresh = None
self.Token_verify = None
self.Trips = None
self.Zones = None
if pagination and isinstance(pagination, int) and pagination > 0:
self.pagination = pagination
else:
self.pagination = 50
def connect(self):
self.__get_csrf_token()
self.__get_token()
self.__get_selected_org()
self.__get_teams()
self.Accounting = Accounting(self.base_url, self.org_pk, self.teams_pk,
self.access_token, self._csrf_token, self.headers, self.pagination)
self.Actions = Actions(self.base_url, self.org_pk, self.teams_pk,
self.access_token, self._csrf_token, self.headers, self.pagination)
self.Annexes = Annexes(self.base_url, self.org_pk, self.teams_pk,
self.access_token, self._csrf_token, self.headers, self.pagination)
self.Areas = Areas(self.base_url, self.org_pk, self.teams_pk,
self.access_token, self._csrf_token, self.headers, self.pagination)
self.Auth = Auth(self.base_url, self.org_pk, self.teams_pk,
self.access_token, self._csrf_token, self.headers, self.pagination)
self.Banks = Banks(self.base_url, self.org_pk, self.teams_pk,
self.access_token, self._csrf_token, self.headers, self.pagination)
self.Billing = Billing(self.base_url, self.org_pk, self.teams_pk,
self.access_token, self._csrf_token, self.headers, self.pagination)
self.Celery_tasks = Celery_tasks(self.base_url, self.org_pk, self.teams_pk,
self.access_token, self._csrf_token, self.headers, self.pagination)
self.Clients = Clients(self.base_url, self.org_pk, self.teams_pk,
self.access_token, self._csrf_token, self.headers, self.pagination)
self.Contacts = Contacts(self.base_url, self.org_pk, self.teams_pk,
self.access_token, self._csrf_token, self.headers, self.pagination)
self.Contracts = Contracts(self.base_url, self.org_pk, self.teams_pk,
self.access_token, self._csrf_token, self.headers, self.pagination)
self.Costs = Costs(self.base_url, self.org_pk, self.teams_pk,
self.access_token, self._csrf_token, self.headers, self.pagination)
self.Countries = Countries(self.base_url, self.org_pk, self.teams_pk,
self.access_token, self._csrf_token, self.headers, self.pagination)
self.Currencies = Currencies(self.base_url, self.org_pk, self.teams_pk,
self.access_token, self._csrf_token, self.headers, self.pagination)
self.Customfields = Customfields(self.base_url, self.org_pk, self.teams_pk,
self.access_token, self._csrf_token, self.headers, self.pagination)
self.Defaults = Defaults(self.base_url, self.org_pk, self.teams_pk,
self.access_token, self._csrf_token, self.headers, self.pagination)
self.Documents = Documents(self.base_url, self.org_pk, self.teams_pk,
self.access_token, self._csrf_token, self.headers, self.pagination)
self.Emails = Emails(self.base_url, self.org_pk, self.teams_pk,
self.access_token, self._csrf_token, self.headers, self.pagination)
self.Employees = Employees(self.base_url, self.org_pk, self.teams_pk,
self.access_token, self._csrf_token, self.headers, self.pagination)
self.Expenses = Expenses(self.base_url, self.org_pk, self.teams_pk,
self.access_token, self._csrf_token, self.headers, self.pagination)
self.Fees = Fees(self.base_url, self.org_pk, self.teams_pk,
self.access_token, self._csrf_token, self.headers, self.pagination)
self.Files = Files(self.base_url, self.org_pk, self.teams_pk,
self.access_token, self._csrf_token, self.headers, self.pagination)
self.Goals = Goals(self.base_url, self.org_pk, self.teams_pk,
self.access_token, self._csrf_token, self.headers, self.pagination)
self.Help = Help(self.base_url, self.org_pk, self.teams_pk,
self.access_token, self._csrf_token, self.headers, self.pagination)
self.Helper = Helper(self.base_url, self.org_pk, self.teams_pk,
self.access_token, self._csrf_token, self.headers, self.pagination)
self.Imports = Imports(self.base_url, self.org_pk, self.teams_pk,
self.access_token, self._csrf_token, self.headers, self.pagination)
self.Inbound_emails = Inbound_emails(self.base_url, self.org_pk, self.teams_pk,
self.access_token, self._csrf_token, self.headers, self.pagination)
self.Indicators = Indicators(self.base_url, self.org_pk, self.teams_pk,
self.access_token, self._csrf_token, self.headers, self.pagination)
self.Invitations = Invitations(self.base_url, self.org_pk, self.teams_pk,
self.access_token, self._csrf_token, self.headers, self.pagination)
self.Invoices = Invoices(self.base_url, self.org_pk, self.teams_pk,
self.access_token, self._csrf_token, self.headers, self.pagination)
self.Jobs = Jobs(self.base_url, self.org_pk, self.teams_pk,
self.access_token, self._csrf_token, self.headers, self.pagination)
self.Languages = Languages(self.base_url, self.org_pk, self.teams_pk,
self.access_token, self._csrf_token, self.headers, self.pagination)
self.Lots = Lots(self.base_url, self.org_pk, self.teams_pk,
self.access_token, self._csrf_token, self.headers, self.pagination)
self.Milestones = Milestones(self.base_url, self.org_pk, self.teams_pk,
self.access_token, self._csrf_token, self.headers, self.pagination)
self.Newsletters = Newsletters(self.base_url, self.org_pk, self.teams_pk,
self.access_token, self._csrf_token, self.headers, self.pagination)
self.Notes = Notes(self.base_url, self.org_pk, self.teams_pk,
self.access_token, self._csrf_token, self.headers, self.pagination)
self.Notifications = Notifications(self.base_url, self.org_pk, self.teams_pk,
self.access_token, self._csrf_token, self.headers, self.pagination)
self.Organizations = Organizations(self.base_url, self.org_pk, self.teams_pk,
self.access_token, self._csrf_token, self.headers, self.pagination)
self.Orgusers = Orgusers(self.base_url, self.org_pk, self.teams_pk,
self.access_token, self._csrf_token, self.headers, self.pagination)
self.Payments = Payments(self.base_url, self.org_pk, self.teams_pk,
self.access_token, self._csrf_token, self.headers, self.pagination)
self.Permissions = Permissions(self.base_url, self.org_pk, self.teams_pk,
self.access_token, self._csrf_token, self.headers, self.pagination)
self.Phases = Phases(self.base_url, self.org_pk, self.teams_pk,
self.access_token, self._csrf_token, self.headers, self.pagination)
self.Pipelines = Pipelines(self.base_url, self.org_pk, self.teams_pk,
self.access_token, self._csrf_token, self.headers, self.pagination)
self.Plans = Plans(self.base_url, self.org_pk, self.teams_pk,
self.access_token, self._csrf_token, self.headers, self.pagination)
self.Posts = Posts(self.base_url, self.org_pk, self.teams_pk,
self.access_token, self._csrf_token, self.headers, self.pagination)
self.Prescriptions = Prescriptions(self.base_url, self.org_pk, self.teams_pk,
self.access_token, self._csrf_token, self.headers, self.pagination)
self.Profiles = Profiles(self.base_url, self.org_pk, self.teams_pk,
self.access_token, self._csrf_token, self.headers, self.pagination)
self.Projections = Projections(self.base_url, self.org_pk, self.teams_pk,
self.access_token, self._csrf_token, self.headers, self.pagination)
self.Projects = Projects(self.base_url, self.org_pk, self.teams_pk,
self.access_token, self._csrf_token, self.headers, self.pagination)
self.Quickbooks = Quickbooks(self.base_url, self.org_pk, self.teams_pk,
self.access_token, self._csrf_token, self.headers, self.pagination)
self.Reports = Reports(self.base_url, self.org_pk, self.teams_pk,
self.access_token, self._csrf_token, self.headers, self.pagination)
self.Revenue = Revenue(self.base_url, self.org_pk, self.teams_pk,
self.access_token, self._csrf_token, self.headers, self.pagination)
self.Revisions = Revisions(self.base_url, self.org_pk, self.teams_pk,
self.access_token, self._csrf_token, self.headers, self.pagination)
self.Roles = Roles(self.base_url, self.org_pk, self.teams_pk,
self.access_token, self._csrf_token, self.headers, self.pagination)
self.Stats = Stats(self.base_url, self.org_pk, self.teams_pk,
self.access_token, self._csrf_token, self.headers, self.pagination)
self.Styleguides = Styleguides(self.base_url, self.org_pk, self.teams_pk,
self.access_token, self._csrf_token, self.headers, self.pagination)
self.Tags = Tags(self.base_url, self.org_pk, self.teams_pk,
self.access_token, self._csrf_token, self.headers, self.pagination)
self.Tasks = Tasks(self.base_url, self.org_pk, self.teams_pk,
self.access_token, self._csrf_token, self.headers, self.pagination)
self.Teams = Teams(self.base_url, self.org_pk, self.teams_pk,
self.access_token, self._csrf_token, self.headers, self.pagination)
self.Timelogs = Timelogs(self.base_url, self.org_pk, self.teams_pk,
self.access_token, self._csrf_token, self.headers, self.pagination)
self.Timeperiods = Timeperiods(self.base_url, self.org_pk, self.teams_pk,
self.access_token, self._csrf_token, self.headers, self.pagination)
self.Token_auth = Token_auth(self.base_url, self.org_pk, self.teams_pk,
self.access_token, self._csrf_token, self.headers, self.pagination)
self.Token_refresh = Token_refresh(self.base_url, self.org_pk, self.teams_pk,
self.access_token, self._csrf_token, self.headers, self.pagination)
self.Token_verify = Token_verify(self.base_url, self.org_pk, self.teams_pk,
self.access_token, self._csrf_token, self.headers, self.pagination)
self.Trips = Trips(self.base_url, self.org_pk, self.teams_pk,
self.access_token, self._csrf_token, self.headers, self.pagination)
self.Zones = Zones(self.base_url, self.org_pk, self.teams_pk,
self.access_token, self._csrf_token, self.headers, self.pagination)
def base_url(self):
""" Choose base_url based on ENV variable """
ENVIRONMENT = os.getenv("ENVIRONMENT", default=None)
if ENVIRONMENT and ENVIRONMENT == 'STAGING':
self.base_url = 'https://ooti-staging-3.herokuapp.com/api/'
elif ENVIRONMENT and ENVIRONMENT == 'LOCAL':
self.base_url = 'http://127.0.0.1:8000/api/'
else:
self.base_url = 'https://app.ooti.co/api/'
def update_pagination(self, pagination):
""" Setter for pagination """
if pagination and isinstance(pagination, int) and pagination > 0:
self.Accounting.pagination = pagination
self.Actions.pagination = pagination
self.Annexes.pagination = pagination
self.Areas.pagination = pagination
self.Auth.pagination = pagination
self.Banks.pagination = pagination
self.Billing.pagination = pagination
self.Celery_tasks.pagination = pagination
self.Clients.pagination = pagination
self.Contacts.pagination = pagination
self.Contracts.pagination = pagination
self.Costs.pagination = pagination
self.Countries.pagination = pagination
self.Currencies.pagination = pagination
self.Customfields.pagination = pagination
self.Defaults.pagination = pagination
self.Documents.pagination = pagination
self.Emails.pagination = pagination
self.Employees.pagination = pagination
self.Expenses.pagination = pagination
self.Fees.pagination = pagination
self.Files.pagination = pagination
self.Goals.pagination = pagination
self.Help.pagination = pagination
self.Helper.pagination = pagination
self.Imports.pagination = pagination
self.Inbound_emails.pagination = pagination
self.Indicators.pagination = pagination
self.Invitations.pagination = pagination
self.Invoices.pagination = pagination
self.Jobs.pagination = pagination
self.Languages.pagination = pagination
self.Lots.pagination = pagination
self.Milestones.pagination = pagination
self.Newsletters.pagination = pagination
self.Notes.pagination = pagination
self.Notifications.pagination = pagination
self.Organizations.pagination = pagination
self.Orgusers.pagination = pagination
self.Payments.pagination = pagination
self.Permissions.pagination = pagination
self.Phases.pagination = pagination
self.Pipelines.pagination = pagination
self.Plans.pagination = pagination
self.Posts.pagination = pagination
self.Prescriptions.pagination = pagination
self.Profiles.pagination = pagination
self.Projections.pagination = pagination
self.Projects.pagination = pagination
self.Quickbooks.pagination = pagination
self.Reports.pagination = pagination
self.Revenue.pagination = pagination
self.Revisions.pagination = pagination
self.Roles.pagination = | |
to the result
:param should_escape:
If the string should be HTML-escaped
:return:
A unicode string or strlist
"""
if value is None:
return u''
type_ = type(value)
if type_ is not strlist:
if type_ is not str_class:
if type_ is bool:
value = u'true' if value else u'false'
else:
value = str_class(value)
if should_escape:
value = escape(value)
return value
def ensure_scope(context, root):
return context if isinstance(context, Scope) else Scope(context, context, root)
def _each(this, options, context):
result = strlist()
# All sequences in python have a length
try:
last_index = len(context) - 1
# If there are no items, we want to trigger the else clause
if last_index < 0:
raise IndexError()
except (TypeError, IndexError):
return options['inverse'](this)
# We use the presence of a keys method to determine if the
# key attribute should be passed to the block handler
has_keys = hasattr(context, 'keys')
index = 0
for value in context:
kwargs = {
'index': index,
'first': index == 0,
'last': index == last_index
}
if has_keys:
kwargs['key'] = value
value = context[value]
scope = Scope(value, this, options['root'], **kwargs)
# Necessary because of cases such as {{^each things}}test{{/each}}.
try:
result.grow(options['fn'](scope))
except TypeError:
pass
index += 1
return result
def _if(this, options, context):
if hasattr(context, '__call__'):
context = context(this)
if context:
return options['fn'](this)
else:
return options['inverse'](this)
def _log(this, context):
pybars.log(context)
def _unless(this, options, context):
if not context:
return options['fn'](this)
def _lookup(this, context, key):
try:
return context[key]
except (KeyError, IndexError, TypeError):
return
def _blockHelperMissing(this, options, context):
if hasattr(context, '__call__'):
context = context(this)
if context != u"" and not context:
return options['inverse'](this)
if type(context) in (list, strlist, tuple):
return _each(this, options, context)
if context is True:
callwith = this
else:
callwith = context
return options['fn'](callwith)
def _helperMissing(scope, name, *args):
if not args:
return None
raise PybarsError(u"Could not find property %s" % (name,))
def _with(this, options, context):
return options['fn'](context)
# scope for the compiled code to reuse globals
_pybars_ = {
'helpers': {
'blockHelperMissing': _blockHelperMissing,
'each': _each,
'if': _if,
'helperMissing': _helperMissing,
'log': _log,
'unless': _unless,
'with': _with,
'lookup': _lookup,
},
}
class FunctionContainer:
"""
Used as a container for functions by the CodeBuidler
"""
def __init__(self, name, code):
self.name = name
self.code = code
@property
def full_code(self):
headers = (
u'import pybars\n'
u'\n'
u'if pybars.__version__ != %s:\n'
u' raise pybars.PybarsError("This template was precompiled with pybars3 version %s, running version %%s" %% pybars.__version__)\n'
u'\n'
u'from pybars import strlist, Scope, PybarsError\n'
u'from pybars._compiler import _pybars_, escape, resolve, resolve_subexpr, prepare, ensure_scope\n'
u'\n'
u'from functools import partial\n'
u'\n'
u'\n'
) % (repr(pybars.__version__), pybars.__version__)
return headers + self.code
class CodeBuilder:
"""Builds code for a template."""
def __init__(self):
self._reset()
def _reset(self):
self.stack = []
self.var_counter = 1
self.render_counter = 0
def start(self):
function_name = 'render' if self.render_counter == 0 else 'block_%s' % self.render_counter
self.render_counter += 1
self.stack.append((strlist(), {}, function_name))
self._result, self._locals, _ = self.stack[-1]
# Context may be a user hash or a Scope (which injects '@_parent' to
# implement .. lookups). The JS implementation uses a vector of scopes
# and then interprets a linear walk-up, which is why there is a
# disabled test showing arbitrary complex path manipulation: the scope
# approach used here will probably DTRT but may be slower: reevaluate
# when profiling.
if len(self.stack) == 1:
self._result.grow([
u"def render(context, helpers=None, partials=None, root=None):\n"
u" _helpers = dict(_pybars_['helpers'])\n"
u" if helpers is not None:\n"
u" _helpers.update(helpers)\n"
u" helpers = _helpers\n"
u" if partials is None:\n"
u" partials = {}\n"
u" called = root is None\n"
u" if called:\n"
u" root = context\n"
])
else:
self._result.grow(u"def %s(context, helpers, partials, root):\n" % function_name)
self._result.grow(u" result = strlist()\n")
self._result.grow(u" context = ensure_scope(context, root)\n")
def finish(self):
lines, ns, function_name = self.stack.pop(-1)
# Ensure the result is a string and not a strlist
if len(self.stack) == 0:
self._result.grow(u" if called:\n")
self._result.grow(u" result = %s(result)\n" % str_class.__name__)
self._result.grow(u" return result\n")
source = str_class(u"".join(lines))
self._result = self.stack and self.stack[-1][0]
self._locals = self.stack and self.stack[-1][1]
code = ''
for key in ns:
if isinstance(ns[key], FunctionContainer):
code += ns[key].code + '\n'
else:
code += '%s = %s\n' % (key, repr(ns[key]))
code += source
result = FunctionContainer(function_name, code)
if debug and len(self.stack) == 0:
print('Compiled Python')
print('---------------')
print(result.full_code)
return result
def _wrap_nested(self, name):
return u"partial(%s, helpers=helpers, partials=partials, root=root)" % name
def add_block(self, symbol, arguments, nested, alt_nested):
name = nested.name
self._locals[name] = nested
if alt_nested:
alt_name = alt_nested.name
self._locals[alt_name] = alt_nested
call = self.arguments_to_call(arguments)
self._result.grow([
u" options = {'fn': %s}\n" % self._wrap_nested(name),
u" options['helpers'] = helpers\n"
u" options['partials'] = partials\n"
u" options['root'] = root\n"
])
if alt_nested:
self._result.grow([
u" options['inverse'] = ",
self._wrap_nested(alt_name),
u"\n"
])
else:
self._result.grow([
u" options['inverse'] = lambda this: None\n"
])
self._result.grow([
u" value = helper = helpers.get('%s')\n" % symbol,
u" if value is None:\n"
u" value = resolve(context, '%s')\n" % symbol,
u" if helper and hasattr(helper, '__call__'):\n"
u" value = helper(context, options%s\n" % call,
u" else:\n"
u" value = helpers['blockHelperMissing'](context, options, value)\n"
u" result.grow(value or '')\n"
])
def add_literal(self, value):
self._result.grow(u" result.append(%s)\n" % repr(value))
def _lookup_arg(self, arg):
if not arg:
return u"context"
return arg
def arguments_to_call(self, arguments):
params = list(map(self._lookup_arg, arguments))
output = u', '.join(params) + u')'
if len(params) > 0:
output = u', ' + output
return output
def find_lookup(self, path, path_type, call):
if path_type == "simple": # simple names can reference helpers.
# TODO: compile this whole expression in the grammar; for now,
# fugly but only a compile time overhead.
# XXX: just rm.
realname = path.replace('.get("', '').replace('")', '')
self._result.grow([
u" value = helpers.get('%s')\n" % realname,
u" if value is None:\n"
u" value = resolve(context, '%s')\n" % path,
])
else:
realname = None
self._result.grow(u" value = %s\n" % path)
self._result.grow([
u" if hasattr(value, '__call__'):\n"
u" value = value(context%s\n" % call,
])
if realname:
self._result.grow(
u" elif value is None:\n"
u" value = helpers['helperMissing'](context, '%s'%s\n"
% (realname, call)
)
def add_escaped_expand(self, path_type_path, arguments):
(path_type, path) = path_type_path
call = self.arguments_to_call(arguments)
self.find_lookup(path, path_type, call)
self._result.grow([
u" result.grow(prepare(value, True))\n"
])
def add_expand(self, path_type_path, arguments):
(path_type, path) = path_type_path
call = self.arguments_to_call(arguments)
self.find_lookup(path, path_type, call)
self._result.grow([
u" result.grow(prepare(value, False))\n"
])
def _debug(self):
self._result.grow(u" import pdb;pdb.set_trace()\n")
def add_invertedblock(self, symbol, arguments, nested, alt_nested):
# This may need to be a blockHelperMissing clal as well.
name = nested.name
self._locals[name] = nested
if alt_nested:
alt_name = alt_nested.name
self._locals[alt_name] = alt_nested
call = self.arguments_to_call(arguments)
self._result.grow([
u" options = {'inverse': %s}\n" % self._wrap_nested(name),
u" options['helpers'] = helpers\n"
u" options['partials'] = partials\n"
u" options['root'] = root\n"
])
if alt_nested:
self._result.grow([
u" options['fn'] = ",
self._wrap_nested(alt_name),
u"\n"
])
else:
self._result.grow([
u" options['fn'] = lambda this: None\n"
])
self._result.grow([
u" value = helper = helpers.get('%s')\n" % symbol,
u" if value is None:\n"
u" value = resolve(context, '%s')\n" % symbol,
u" if helper and hasattr(helper, '__call__'):\n"
u" value = helper(context, options%s\n" % call,
u" else:\n"
u" value = helpers['blockHelperMissing'](context, options, value)\n"
u" result.grow(value or '')\n"
])
def _invoke_template(self, fn_name, this_name):
self._result.grow([
u" result.grow(",
fn_name,
u"(",
this_name,
u", helpers=helpers, partials=partials, root=root))\n"
])
def add_partial(self, symbol, arguments):
arg = ""
overrides = None
positional_args = 0
if arguments:
for argument in arguments:
kwmatch = re.match('(\w+)=(.+)$', argument)
if kwmatch:
if not overrides:
overrides = {}
overrides[kwmatch.group(1)] = kwmatch.group(2)
else:
if positional_args != 0:
raise PybarsError("An extra positional argument was passed to a partial")
positional_args += 1
arg = argument
overrides_literal = 'None'
if overrides:
overrides_literal = u'{'
for key in overrides:
overrides_literal += u'"%s": %s, ' % (key, overrides[key])
overrides_literal += u'}'
self._result.grow([u" overrides = %s\n" % overrides_literal])
self._result.grow([
u" partialName = %s\n" % symbol,
u" if partialName not in partials:\n",
u" raise PybarsError('The partial %s could not be found' % partialName)\n",
u" inner = partials[partialName]\n",
u" scope = Scope(%s, context, root, overrides=overrides)\n" % self._lookup_arg(arg)])
self._invoke_template("inner", "scope")
class Compiler:
"""A handlebars template compiler.
The compiler is not threadsafe: you need one per thread because of the
state in CodeBuilder.
"""
_handlebars = OMeta.makeGrammar(handlebars_grammar, {}, 'handlebars')
_builder = CodeBuilder()
_compiler = OMeta.makeGrammar(compile_grammar, {'builder': _builder})
def __init__(self):
self._helpers = {}
self.template_counter = 1
def _extract_word(self, | |
<reponame>ChateauClaudia-Labs/apodeixi<filename>src/apodeixi/controllers/journeys/delivery_planning/journeys_controller.py<gh_stars>0
import itertools as _itertools
from apodeixi.controllers.util.manifest_api import ManifestAPI
from apodeixi.controllers.journeys.delivery_planning.journeys_posting_label import JourneysPostingLabel
from apodeixi.controllers.util.skeleton_controller import SkeletonController
from apodeixi.controllers.admin.static_data.static_data_validator import StaticDataValidator
from apodeixi.knowledge_base.filing_coordinates import JourneysFilingCoordinates
from apodeixi.knowledge_base.knowledge_base_util import FormRequest
from apodeixi.util.a6i_error import ApodeixiError
from apodeixi.util.formatting_utils import StringUtils
from apodeixi.util.dictionary_utils import DictionaryUtils
from apodeixi.util.dataframe_utils import DataFrameUtils
class JourneysController(SkeletonController):
'''
Abstrac class to with common properties for posting controllers in the Journey domain.
@param store A KnowledgeBaseStore instance. Handles all I/O of postings and manifests for this controller.
@param a6i_config The ApodeixiConfig instance for the Python process in which we are running.
'''
def __init__(self, parent_trace, store, a6i_config):
super().__init__(parent_trace, store, a6i_config)
self.MANIFEST_API = ManifestAPI( parent_trace = parent_trace,
subdomain = 'delivery-planning',
domain = 'journeys',
api_publisher = 'a6i',
extension = 'io')
self.using_subproducts = None # This will be set in self._buildOneManifest
self.subproducts = None # This will be set in self._buildOneManifest
def getFilingClass(self):
'''
Returns a class object, corresponding to the concrete subclass of FilingCoordinates
that is supported by this controller
'''
return JourneysFilingCoordinates
def getManifestAPI(self):
return self.MANIFEST_API
def subnamespaceFromLabel(self, parent_trace, label):
'''
Helper method that returns what the 'subnamespace' that is a portion of a manifest's name.
It is inferred from a `label` that provides the posting details for a manifest that should be created.
Returns a string corresponding to the subnamespace, if one applies to this `kind` of manifest.
If no subnamespace applies, returns None.
'''
journey = label.journey (parent_trace)
FMT = StringUtils().format_as_yaml_fieldname # Abbreviation for readability
return FMT(journey)
def manifestNameFromLabel(self, parent_trace, label, kind):
'''
Helper method that returns what the 'name' field should be in the manifest to be created with the given
label
@param kind The kind of manifest for which the name is sought. This parameter can be ignored for controller
classes that use the same name for all supported kinds; it is meant to support controllers that
process multiple manifest kinds and do not use the same name for all of them. For example, controllers
that point to reference data in a different domain/sub-domain.
'''
product = label.product (parent_trace)
journey = label.journey (parent_trace)
scenario = label.scenario (parent_trace)
scoring_cycle = label.scoring_cycle (parent_trace)
FMT = StringUtils().format_as_yaml_fieldname # Abbreviation for readability
name = FMT(journey + '.' + scoring_cycle + '.' + product + '.' + scenario)
return name
def manifestNameFromCoords(self, parent_trace, subnamespace, coords, kind):
'''
Helper method that returns what the 'name' field should be in the manifest to be created with the given
filing coords, possibly complemented by the subnamespace.
Usually used in the context of generating forms.
Example: consider a manifest name like "modernization.dec-2020.fusionopus.default"
in namespace "my-corp.production".
To build such a name, this method must receive "modernization" as the subnamespace, and
filing coords from which to infer "dec-20220", "fusionopus", and "default".
@param subnamespace A string, which is allowed to be None. If not null, this is a further partioning of
the namespace into finer slices, and a manifest's name is supposed to identify the slice
in which the manifest resides.
@param coords A FilingCoords object corresponding to this controller. It is used, possibly along with the
`subnamespace` parameter, to build a manifest name.
@param kind The kind of manifest for which the name is sought. This parameter can be ignored for controller
classes that use the same name for all supported kinds; it is meant to support controllers that
process multiple manifest kinds and do not use the same name for all of them. For example, controllers
that point to reference data in a different domain/sub-domain.
'''
if not type(coords) == self.getFilingClass():
raise ApodeixiError(parent_trace, "Can't build manifest name because received wrong type of filing coordinates",
data = {"Type of coords received": str(type(coords)),
"Expected type of coords": str(self.getFilingClass())})
if subnamespace == None:
raise ApodeixiError(parent_trace, "Can't build manifest name becase subnamespace is null. Should be "
+ "set to a kind of journey. Example: 'modernization'")
product = coords.product
journey = subnamespace
scenario = coords.scenario
scoring_cycle = coords.scoringCycle
FMT = StringUtils().format_as_yaml_fieldname # Abbreviation for readability
name = FMT(journey + '.' + scoring_cycle + '.' + product + '.' + scenario)
return name
def manifestLabelsFromCoords(self, parent_trace, subnamespace, coords):
'''
Helper method that returns what the a dict whose keys are label field names that should be populated
inside a manifest based on the parameters, and the values are what the value should be for each label.
Usually used in the context of generating forms.
Example: consider a manifest name like "modernization.dec-2020.fusionopus.default"
in namespace "my-corp.production", that arose from a posting for product "Fusion Opus",
scoring cycle "Dec 2020" and scenario "Default".
Then this method returns ["modernization", "Dec 2020", "Fusion Opus", and "Default"].
@param subnamespace A string, which is allowed to be None. If not null, this is a further partioning of
the namespace into finer slices, and a manifest's name is supposed to identify the slice
in which the manifest resides.
@param coords A FilingCoords object corresponding to this controller. It is used, possibly along with the
`subnamespace` parameter, to build a manifest name.
'''
if not type(coords) == self.getFilingClass():
raise ApodeixiError(parent_trace, "Can't build manifest name because received wrong type of filing coordinates",
data = {"Type of coords received": str(type(coords)),
"Expected type of coords": str(self.getFilingClass())})
if subnamespace == None:
raise ApodeixiError(parent_trace, "Can't build manifest name becase subnamespace is null. Should be "
+ "set to a kind of journey. Example: 'modernization'")
product = coords.product
journey = subnamespace
scenario = coords.scenario
scoring_cycle = coords.scoringCycle
MY_PL = JourneysPostingLabel # Abbreviation for readability
result_dict = {}
result_dict[MY_PL._PRODUCT] = product
result_dict[MY_PL._JOURNEY] = journey
result_dict[MY_PL._SCENARIO] = scenario
result_dict[MY_PL._SCORING_CYCLE] = scoring_cycle
result_dict[MY_PL._SCORING_MATURITY] = ""
return result_dict
def _buildOneManifest(self, parent_trace, posting_data_handle, label):
'''
Helper function, amenable to unit testing, unlike the enveloping controller `apply` function that require a knowledge base
structure
'''
# Determine if we have subproducts, because in that case we need to remember that since it may affect how some
# derived classes create their posting configuration. For example, for kind `big-rock-estimate` the posting configuration
# needs to record that there will be 2 headers (a MultiLevel index in Pandas), not 1
#
# GOTCHA: This determination must be done *before* calling super()._buildOneManifest, since super will lead to the
# actual loading of the Excel, which requires having the posting configuration aware of how many headers are expected
# in Excel (i.e., whether Pandas shuold build a MultiLevel index for the columns, or not)
# It is for that reason that his code duplicates some things that the call to super() will redo, like determining
# the namespace
my_trace = parent_trace.doing("Determining if subproducts exist")
if True:
organization = label.organization (parent_trace)
kb_area = label.knowledgeBaseArea (parent_trace)
FMT = StringUtils().format_as_yaml_fieldname # Abbreviation for readability
namespace = FMT(organization + '.' + kb_area)
product = label.product (my_trace)
validator = StaticDataValidator(parent_trace, self.store, self.a6i_config)
subproducts = validator.getSubProducts(parent_trace, namespace, product)
if len(subproducts) == 0:
self.using_subproducts = False
else:
self.using_subproducts = True # Derived classes can use this in self.getPostingConfig to ensure multiple headers are on
self.subproducts = subproducts
manifest_dict = super()._buildOneManifest(parent_trace, posting_data_handle, label)
my_trace = parent_trace.doing("Getting PostingLabel fields specific to BigRocksEstimate_Controller")
product = label.product (my_trace)
journey = label.journey (my_trace)
scenario = label.scenario (my_trace)
scoring_cycle = label.scoring_cycle (my_trace)
scoring_maturity = label.scoring_maturity (my_trace)
my_trace = parent_trace.doing("Enriching generic manifest fields with additional fields "
+ "specific to BigRocksEstimate_Controller")
if True:
metadata = manifest_dict['metadata']
MY_PL = JourneysPostingLabel # Abbreviation for readability
labels = metadata['labels']
labels[MY_PL._PRODUCT] = product
labels[MY_PL._JOURNEY] = journey
labels[MY_PL._SCENARIO] = scenario
labels[MY_PL._SCORING_CYCLE] = scoring_cycle
labels[MY_PL._SCORING_MATURITY] = scoring_maturity
assertion = manifest_dict['assertion']
assertion[MY_PL._SCENARIO] = scenario
assertion[MY_PL._SCORING_CYCLE] = scoring_cycle
assertion[MY_PL._SCORING_MATURITY] = scoring_maturity
return manifest_dict
def createTemplate(self, parent_trace, form_request, kind):
'''
Returns a "template" for a manifest, i.e., a dict that has the basic fields (with empty or mocked-up
content) to support a ManifestRepresenter to create an Excel spreadsheet with that information.
It is intended to support the processing of blind form requests.
For reasons of convenience (to avoid going | |
<reponame>ronaldseoh/transformers-nli<gh_stars>1-10
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" NLI processors and helpers """
import logging
import os
import pandas as pd
import copy
import tqdm
import json
logger = logging.getLogger(__name__)
class NLIInputExample(object):
"""
A single training/test example for simple sequence classification.
Args:
guid: Unique id for the example.
text_a: string. The untokenized text of the first sequence. For single
sequence tasks, only this sequence must be specified.
text_b: (Optional) string. The untokenized text of the second sequence.
Only must be specified for sequence pair tasks.
label: (Optional) string. The label of the example. This should be
specified for train and dev examples, but not for test examples.
"""
def __init__(self, guid, premise, hypothesis, label):
self.guid = guid
self.premise = premise
self.hypothesis = hypothesis
self.label = label
def __repr__(self):
return str(self.to_json_string())
def to_dict(self):
"""Serializes this instance to a Python dictionary."""
output = copy.deepcopy(self.__dict__)
return output
def to_json_string(self):
"""Serializes this instance to a JSON string."""
return json.dumps(self.to_dict(), indent=2, sort_keys=True) + "\n"
class QA2NLIInputExample(object):
"""
A single training/test example for simple sequence classification.
Args:
guid: Unique id for the example.
text_a: string. The untokenized text of the first sequence. For single
sequence tasks, only this sequence must be specified.
text_b: (Optional) string. The untokenized text of the second sequence.
Only must be specified for sequence pair tasks.
label: (Optional) string. The label of the example. This should be
specified for train and dev examples, but not for test examples.
"""
def __init__(self, example_id, premise, options, label):
self.example_id = example_id
self.premise = premise
self.options = options
self.label = label
def __repr__(self):
return str(self.to_json_string())
def to_dict(self):
"""Serializes this instance to a Python dictionary."""
output = copy.deepcopy(self.__dict__)
return output
def to_json_string(self):
"""Serializes this instance to a JSON string."""
return json.dumps(self.to_dict(), indent=2, sort_keys=True) + "\n"
class NLIInputFeatures(object):
"""
A single set of features of data.
Args:
input_ids: Indices of input sequence tokens in the vocabulary.
attention_mask: Mask to avoid performing attention on padding token indices.
Mask values selected in ``[0, 1]``:
Usually ``1`` for tokens that are NOT MASKED, ``0`` for MASKED (padded) tokens.
token_type_ids: Segment token indices to indicate first and second portions of the inputs.
label: Label corresponding to the input
"""
def __init__(self,
input_ids,
attention_mask=None,
token_type_ids=None,
label=None):
self.input_ids = input_ids
self.attention_mask = attention_mask
self.token_type_ids = token_type_ids
self.label = label
def __repr__(self):
return str(self.to_json_string())
def to_dict(self):
"""Serializes this instance to a Python dictionary."""
output = copy.deepcopy(self.__dict__)
return output
def to_json_string(self):
"""Serializes this instance to a JSON string."""
return json.dumps(self.to_dict(), indent=2, sort_keys=True) + "\n"
class QA2NLIInputFeatures(object):
def __init__(self, example_id, choices_features, label):
self.example_id = example_id
self.choices_features = [{
"input_ids": input_ids,
"input_mask": input_mask,
"segment_ids": segment_ids
} for input_ids, input_mask, segment_ids in choices_features]
self.label = label
class DataProcessor(object):
"""Base class for data converters for sequence classification data sets."""
def get_example_from_tensor_dict(self, tensor_dict):
"""Gets an example from a dict with tensorflow tensors
Args:
tensor_dict: Keys and values should match the corresponding Glue
tensorflow_dataset examples.
"""
raise NotImplementedError()
def get_train_examples(self, data_dir):
"""Gets a collection of `InputExample`s for the train set."""
raise NotImplementedError()
def get_dev_examples(self, data_dir):
"""Gets a collection of `InputExample`s for the dev set."""
raise NotImplementedError()
def get_labels(self):
"""Gets the list of labels for this data set."""
raise NotImplementedError()
def tfds_map(self, example):
"""Some tensorflow_datasets datasets are not formatted the same way the GLUE datasets are.
This method converts examples to the correct format."""
if len(self.get_labels()) > 1:
example.label = self.get_labels()[int(example.label)]
return example
@classmethod
def _read_tsv(cls, input_file, quotechar=None):
"""Reads a tab separated value file."""
with open(input_file, "r", encoding="utf-8-sig") as f:
return list(csv.reader(f, delimiter="\t", quotechar=quotechar))
def nli_convert_examples_to_features(
examples,
tokenizer,
max_length=512,
task=None,
label_list=None,
output_mode=None,
pad_on_left=False,
pad_token=0,
pad_token_segment_id=0,
mask_padding_with_zero=True,
no_passage=False,
):
"""
Loads a data file into a list of ``NLIInputFeatures``
Args:
examples: List of ``InputExamples`` or ``tf.data.Dataset`` containing the examples.
tokenizer: Instance of a tokenizer that will tokenize the examples
max_length: Maximum example length
task: NLI task
label_list: List of labels. Can be obtained from the processor using the ``processor.get_labels()`` method
output_mode: String indicating the output mode. Either ``regression`` or ``classification``
pad_on_left: If set to ``True``, the examples will be padded on the left rather than on the right (default)
pad_token: Padding token
pad_token_segment_id: The segment ID for the padding token (It is usually 0, but can vary such as for XLNet where it is 4)
mask_padding_with_zero: If set to ``True``, the attention mask will be filled by ``1`` for actual values
and by ``0`` for padded values. If set to ``False``, inverts it (``1`` for padded values, ``0`` for
actual values)
Returns:
If the ``examples`` input is a ``tf.data.Dataset``, will return a ``tf.data.Dataset``
containing the task-specific features. If the input is a list of ``InputExamples``, will return
a list of task-specific ``NLIInputFeatures`` which can be fed to the model.
"""
if task is not None:
processor = nli_processors[task]()
if label_list is None:
label_list = processor.get_labels()
logger.info("Using label list %s for task %s" % (label_list, task))
if output_mode is None:
output_mode = nli_output_modes[task]
logger.info(
"Using output mode %s for task %s" % (output_mode, task))
label_map = {label: i for i, label in enumerate(label_list)}
features = []
for (ex_index, example) in enumerate(examples):
if ex_index % 10000 == 0:
logger.info("Writing example %d/%d" % (ex_index, len(examples)))
inputs = tokenizer.encode_plus(
example.hypothesis,
add_special_tokens=True,
return_token_type_ids=True,
pad_to_max_length=True,
return_attention_mask=True,
max_length=max_length) if no_passage else tokenizer.encode_plus(
example.premise,
example.hypothesis,
pad_to_max_length=True,
add_special_tokens=True,
return_token_type_ids=True,
return_attention_mask=True,
max_length=max_length)
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
if output_mode == "classification":
label = label_map[example.label]
elif output_mode == "regression":
label = float(example.label)
else:
raise KeyError(output_mode)
if ex_index < 5:
logger.info("*** Example ***")
logger.info("guid: %s" % (example.guid))
logger.info("input_ids: %s" % " ".join(
[str(x) for x in inputs['input_ids']]))
logger.info("attention_mask: %s" % " ".join(
[str(x) for x in inputs['attention_mask']]))
logger.info("token_type_ids: %s" % " ".join(
[str(x) for x in inputs['token_type_ids']]))
logger.info("label: %s (id = %d)" % (example.label, label))
features.append(NLIInputFeatures(**inputs, label=label))
return features
def qa2nli_convert_examples_to_features(
examples,
tokenizer,
max_length=512,
task=None,
label_list=None,
output_mode=None,
pad_on_left=False,
pad_token=0,
pad_token_segment_id=0,
mask_padding_with_zero=True,
no_passage=False,
):
"""
Loads a data file into a list of ``QA2NLIInputFeatures``
Args:
examples: List of ``InputExamples`` or ``tf.data.Dataset`` containing the examples.
tokenizer: Instance of a tokenizer that will tokenize the examples
max_length: Maximum example length
task: NLI task
label_list: List of labels. Can be obtained from the processor using the ``processor.get_labels()`` method
output_mode: String indicating the output mode. Either ``regression`` or ``classification``
pad_on_left: If set to ``True``, the examples will be padded on the left rather than on the right (default)
pad_token: Padding token
pad_token_segment_id: The segment ID for the padding token (It is usually 0, but can vary such as for XLNet where it is 4)
mask_padding_with_zero: If set to ``True``, the attention mask will be filled by ``1`` for actual values
and by ``0`` for padded values. If set to ``False``, inverts it (``1`` for padded values, ``0`` for
actual values)
Returns:
If the ``examples`` input is a ``tf.data.Dataset``, will return a ``tf.data.Dataset``
containing the task-specific features. If the input is a list of ``InputExamples``, will return
a list of task-specific ``QA2NLIInputFeatures`` which can be fed to the model.
"""
if task is not None:
processor = nli_processors[task]()
if label_list is None:
label_list = processor.get_labels()
logger.info("Using label list %s for task %s" % (label_list, task))
if output_mode is None:
output_mode = nli_output_modes[task]
logger.info(
"Using output mode %s for task %s" % (output_mode, task))
label_map = {label: i for i, label in enumerate(label_list)}
features = []
for (ex_index, example) in tqdm.tqdm(
| |
<gh_stars>0
# Copyright 2013-2017 The Meson development team
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This file contains the detection logic for miscellaneous external dependencies.
import glob
import os
import stat
import sysconfig
from .. import mlog
from .. import mesonlib
from ..environment import detect_cpu_family
from .base import Dependency, DependencyException, DependencyMethods, ExtraFrameworkDependency, PkgConfigDependency
class BoostDependency(Dependency):
# Some boost libraries have different names for
# their sources and libraries. This dict maps
# between the two.
name2lib = {'test': 'unit_test_framework'}
def __init__(self, environment, kwargs):
Dependency.__init__(self, 'boost', kwargs)
self.name = 'boost'
self.environment = environment
self.libdir = ''
self.static = kwargs.get('static', False)
if 'native' in kwargs and environment.is_cross_build():
self.want_cross = not kwargs['native']
else:
self.want_cross = environment.is_cross_build()
try:
self.boost_root = os.environ['BOOST_ROOT']
if not os.path.isabs(self.boost_root):
raise DependencyException('BOOST_ROOT must be an absolute path.')
except KeyError:
self.boost_root = None
if self.boost_root is None:
if self.want_cross:
if 'BOOST_INCLUDEDIR' in os.environ:
self.incdir = os.environ['BOOST_INCLUDEDIR']
else:
raise DependencyException('BOOST_ROOT or BOOST_INCLUDEDIR is needed while cross-compiling')
if mesonlib.is_windows():
self.boost_root = self.detect_win_root()
self.incdir = self.boost_root
else:
if 'BOOST_INCLUDEDIR' in os.environ:
self.incdir = os.environ['BOOST_INCLUDEDIR']
else:
self.incdir = '/usr/include'
else:
self.incdir = os.path.join(self.boost_root, 'include')
self.boost_inc_subdir = os.path.join(self.incdir, 'boost')
mlog.debug('Boost library root dir is', self.boost_root)
self.src_modules = {}
self.lib_modules = {}
self.lib_modules_mt = {}
self.detect_version()
self.requested_modules = self.get_requested(kwargs)
module_str = ', '.join(self.requested_modules)
if self.version is not None:
self.detect_src_modules()
self.detect_lib_modules()
self.validate_requested()
if self.boost_root is not None:
info = self.version + ', ' + self.boost_root
else:
info = self.version
mlog.log('Dependency Boost (%s) found:' % module_str, mlog.green('YES'), info)
else:
mlog.log("Dependency Boost (%s) found:" % module_str, mlog.red('NO'))
if 'cpp' not in self.environment.coredata.compilers:
raise DependencyException('Tried to use Boost but a C++ compiler is not defined.')
self.cpp_compiler = self.environment.coredata.compilers['cpp']
def detect_win_root(self):
globtext = 'c:\\local\\boost_*'
files = glob.glob(globtext)
if len(files) > 0:
return files[0]
return 'C:\\'
def get_compile_args(self):
args = []
if self.boost_root is not None:
if mesonlib.is_windows():
include_dir = self.boost_root
else:
include_dir = os.path.join(self.boost_root, 'include')
else:
include_dir = self.incdir
# Use "-isystem" when including boost headers instead of "-I"
# to avoid compiler warnings/failures when "-Werror" is used
# Careful not to use "-isystem" on default include dirs as it
# breaks some of the headers for certain gcc versions
# For example, doing g++ -isystem /usr/include on a simple
# "int main()" source results in the error:
# "/usr/include/c++/6.3.1/cstdlib:75:25: fatal error: stdlib.h: No such file or directory"
# See https://gcc.gnu.org/bugzilla/show_bug.cgi?id=70129
# and http://stackoverflow.com/questions/37218953/isystem-on-a-system-include-directory-causes-errors
# for more details
# TODO: The correct solution would probably be to ask the
# compiler for it's default include paths (ie: "gcc -xc++ -E
# -v -") and avoid including those with -isystem
# For now, use -isystem for all includes except for some
# typical defaults (which don't need to be included at all
# since they are in the default include paths)
if include_dir != '/usr/include' and include_dir != '/usr/local/include':
args.append("".join(self.cpp_compiler.get_include_args(include_dir, True)))
return args
def get_requested(self, kwargs):
candidates = kwargs.get('modules', [])
if isinstance(candidates, str):
return [candidates]
for c in candidates:
if not isinstance(c, str):
raise DependencyException('Boost module argument is not a string.')
return candidates
def validate_requested(self):
for m in self.requested_modules:
if m not in self.src_modules:
raise DependencyException('Requested Boost module "%s" not found.' % m)
def found(self):
return self.version is not None
def get_version(self):
return self.version
def detect_version(self):
try:
ifile = open(os.path.join(self.boost_inc_subdir, 'version.hpp'))
except FileNotFoundError:
self.version = None
return
with ifile:
for line in ifile:
if line.startswith("#define") and 'BOOST_LIB_VERSION' in line:
ver = line.split()[-1]
ver = ver[1:-1]
self.version = ver.replace('_', '.')
return
self.version = None
def detect_src_modules(self):
for entry in os.listdir(self.boost_inc_subdir):
entry = os.path.join(self.boost_inc_subdir, entry)
if stat.S_ISDIR(os.stat(entry).st_mode):
self.src_modules[os.path.split(entry)[-1]] = True
def detect_lib_modules(self):
if mesonlib.is_windows():
return self.detect_lib_modules_win()
return self.detect_lib_modules_nix()
def detect_lib_modules_win(self):
arch = detect_cpu_family(self.environment.coredata.compilers)
# Guess the libdir
if arch == 'x86':
gl = 'lib32*'
elif arch == 'x86_64':
gl = 'lib64*'
else:
# Does anyone do Boost cross-compiling to other archs on Windows?
gl = None
# See if the libdir is valid
if gl:
libdir = glob.glob(os.path.join(self.boost_root, gl))
else:
libdir = []
# Can't find libdir, bail
if not libdir:
return
libdir = libdir[0]
self.libdir = libdir
globber = 'libboost_*-gd-*.lib' if self.static else 'boost_*-gd-*.lib' # FIXME
for entry in glob.glob(os.path.join(libdir, globber)):
(_, fname) = os.path.split(entry)
base = fname.split('_', 1)[1]
modname = base.split('-', 1)[0]
self.lib_modules_mt[modname] = fname
def detect_lib_modules_nix(self):
if self.static:
libsuffix = 'a'
elif mesonlib.is_osx() and not self.want_cross:
libsuffix = 'dylib'
else:
libsuffix = 'so'
globber = 'libboost_*.{}'.format(libsuffix)
if 'BOOST_LIBRARYDIR' in os.environ:
libdirs = [os.environ['BOOST_LIBRARYDIR']]
elif self.boost_root is None:
libdirs = mesonlib.get_library_dirs()
else:
libdirs = [os.path.join(self.boost_root, 'lib')]
for libdir in libdirs:
for entry in glob.glob(os.path.join(libdir, globber)):
lib = os.path.basename(entry)
name = lib.split('.')[0].split('_', 1)[-1]
# I'm not 100% sure what to do here. Some distros
# have modules such as thread only as -mt versions.
if entry.endswith('-mt.{}'.format(libsuffix)):
self.lib_modules_mt[name] = True
else:
self.lib_modules[name] = True
def get_win_link_args(self):
args = []
if self.boost_root:
args.append('-L' + self.libdir)
for module in self.requested_modules:
module = BoostDependency.name2lib.get(module, module)
if module in self.lib_modules_mt:
args.append(self.lib_modules_mt[module])
return args
def get_link_args(self):
if mesonlib.is_windows():
return self.get_win_link_args()
args = []
if self.boost_root:
args.append('-L' + os.path.join(self.boost_root, 'lib'))
elif 'BOOST_LIBRARYDIR' in os.environ:
args.append('-L' + os.environ['BOOST_LIBRARYDIR'])
for module in self.requested_modules:
module = BoostDependency.name2lib.get(module, module)
libname = 'boost_' + module
# The compiler's library detector is the most reliable so use that first.
default_detect = self.cpp_compiler.find_library(libname, self.environment, [])
if default_detect is not None:
if module == 'unit_testing_framework':
emon_args = self.cpp_compiler.find_library('boost_test_exec_monitor')
else:
emon_args = None
args += default_detect
if emon_args is not None:
args += emon_args
elif module in self.lib_modules or module in self.lib_modules_mt:
linkcmd = '-l' + libname
args.append(linkcmd)
# FIXME a hack, but Boost's testing framework has a lot of
# different options and it's hard to determine what to do
# without feedback from actual users. Update this
# as we get more bug reports.
if module == 'unit_testing_framework':
args.append('-lboost_test_exec_monitor')
elif module + '-mt' in self.lib_modules_mt:
linkcmd = '-lboost_' + module + '-mt'
args.append(linkcmd)
if module == 'unit_testing_framework':
args.append('-lboost_test_exec_monitor-mt')
return args
def get_sources(self):
return []
def need_threads(self):
return 'thread' in self.requested_modules
class ThreadDependency(Dependency):
def __init__(self, environment, kwargs):
super().__init__('threads', {})
self.name = 'threads'
self.is_found = True
mlog.log('Dependency', mlog.bold(self.name), 'found:', mlog.green('YES'))
def need_threads(self):
return True
def get_version(self):
return 'unknown'
class Python3Dependency(Dependency):
def __init__(self, environment, kwargs):
super().__init__('python3', kwargs)
self.name = 'python3'
self.is_found = False
# We can only be sure that it is Python 3 at this point
self.version = '3'
if DependencyMethods.PKGCONFIG in self.methods:
try:
pkgdep = PkgConfigDependency('python3', environment, kwargs)
if pkgdep.found():
self.cargs = pkgdep.cargs
self.libs = pkgdep.libs
self.version = pkgdep.get_version()
self.is_found = True
return
except Exception:
pass
if not self.is_found:
if mesonlib.is_windows() and DependencyMethods.SYSCONFIG in self.methods:
self._find_libpy3_windows(environment)
elif mesonlib.is_osx() and DependencyMethods.EXTRAFRAMEWORK in self.methods:
# In OSX the Python 3 framework does not have a version
# number in its name.
fw = ExtraFrameworkDependency('python', False, None, kwargs)
if fw.found():
self.cargs = fw.get_compile_args()
self.libs = fw.get_link_args()
self.is_found = True
if self.is_found:
mlog.log('Dependency', mlog.bold(self.name), 'found:', mlog.green('YES'))
else:
mlog.log('Dependency', mlog.bold(self.name), 'found:', mlog.red('NO'))
def _find_libpy3_windows(self, env):
'''
Find python3 libraries on Windows and also verify that the arch matches
what we are building for.
'''
pyarch = sysconfig.get_platform()
arch = detect_cpu_family(env.coredata.compilers)
if arch == 'x86':
arch = '32'
elif arch == 'x86_64':
arch = '64'
else:
# We can't cross-compile Python 3 dependencies on Windows yet
mlog.log('Unknown architecture {!r} for'.format(arch),
mlog.bold(self.name))
self.is_found = False
return
# Pyarch ends in '32' or '64'
if arch != pyarch[-2:]:
mlog.log('Need', mlog.bold(self.name),
'for {}-bit, but found {}-bit'.format(arch, pyarch[-2:]))
self.is_found = False
return
inc = sysconfig.get_path('include')
platinc = sysconfig.get_path('platinclude')
self.cargs = ['-I' + inc]
if inc != platinc:
self.cargs.append('-I' + platinc)
# Nothing exposes this directly that I coulf find
basedir | |
"""tests dynamic cards and dynamic load cards"""
import unittest
from io import StringIO
import numpy as np
import pyNastran
from pyNastran.bdf.bdf import BDF, read_bdf, CrossReferenceError
from pyNastran.bdf.cards.test.utils import save_load_deck
#ROOT_PATH = pyNastran.__path__[0]
class TestDynamic(unittest.TestCase):
"""
The cards tested are:
* TSTEP
"""
def test_tstep(self):
"""tests a TSTEP card"""
model = BDF(debug=None)
sid = 42
n1 = n2 = 5
dt1 = dt2 = 0.1
no1 = no2 = 3
card = ['TSTEP', sid,
n1, dt1, no1, None, None, None, None, None,
n2, dt2, no2]
model.add_card(card, card[0], comment='tstep comment')
model.validate()
tstep = model.tsteps[42]
tstep.raw_fields()
tstep.write_card()
tstep.write_card(size=16)
sid = 43
N = 5
DT = 0.1
NO = 3
tstep2 = model.add_tstep(sid, N, DT, NO)
tstep2.raw_fields()
tstep2.write_card()
tstep2.write_card(size=16)
save_load_deck(model)
def test_tstepnl(self):
"""tests a TSTEPNL card"""
model = BDF(debug=None)
card = ['TSTEPNL', 250, 100, .01, 1, 'ADAPT', 2, 10, 'PW',
1.E-2, 1.E-3, 1.E-6, 2, 10, 2, .02, None,
5, 5, 0, 0.75, 16.0, 0.1, 20.,]
model.add_card(card, card[0], comment='tstepnl comment')
model.validate()
tstepnl = model.tstepnls[250]
tstepnl.raw_fields()
tstepnl.write_card()
tstepnl.write_card(size=16)
sid = 42
ndt = 10
dt = 3.
no = 5
tstepnl2 = model.add_tstepnl(sid, ndt, dt, no)
tstepnl2.raw_fields()
tstepnl2.write_card()
tstepnl2.write_card(size=16)
save_load_deck(model)
def test_delay(self):
"""tests a two field DELAY card"""
model = BDF(debug=False)
node1, c1, t1 = 100, 3, 0.3
node2, c2, t2 = 101, 4, 0.4
sid = 42
card_lines = ['DELAY', sid, node1, c1, t1, node2, c2, t2]
model.add_card(card_lines, card_lines[0], comment='', is_list=True,
has_none=True)
model.add_grid(100, [0., 0., 0.])
model.add_grid(101, [0., 0., 0.])
model.validate()
model.cross_reference()
#print(model.delays[42])
save_load_deck(model)
def test_dphase(self):
"""tests a two field DPHASE card"""
model = BDF(debug=False)
node1, c1, t1 = 100, 3, 0.3
node2, c2, t2 = 101, 4, 0.4
sid = 42
card_lines = ['DPHASE', sid, node1, c1, t1, node2, c2, t2]
model.add_card(card_lines, card_lines[0], comment='', is_list=True,
has_none=True)
model.add_grid(100, [0., 0., 0.])
model.add_grid(101, [0., 0., 0.])
model.validate()
model.cross_reference()
#print(model.dphases[42])
save_load_deck(model)
def test_freq(self):
"""tests FREQ, FREQ1, FREQ2, FREQ4"""
model = BDF(debug=False)
sid = 101
freqs = 0.1
freq = model.add_freq(sid, freqs, comment='freq')
#print(freq)
freqs = [2.0, 3.0]
freq = model.add_freq(sid, freqs, comment='freq')
#print(freq)
f1 = 0.
df = 2.0
freq1 = model.add_freq1(sid, f1, df, ndf=5, comment='freq1')
assert len(freq1.freqs) == 6, 'freqs=%s' % freq1.freqs
#print(freq1)
f1 = 1.
f2 = 8.0
freq2 = model.add_freq2(sid, f1, f2, nf=6, comment='freq2')
assert len(freq2.freqs) == 7, 'freqs=%s' % freq2.freqs
assert np.allclose(freq2.freqs.max(), f2), freq2.freqs
#print(freq2)
freq4 = model.add_freq4(sid, f1, f2, fspread=0.1, nfm=3, comment='freq4')
#print(model.frequencies[sid])
#print(freq4)
fractions = [0., 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0]
freq5 = model.add_freq5(sid, fractions, f1=0., f2=100., comment='freq5')
fractions = np.linspace(0., 1.)
unused_freq5b = model.add_freq5(sid, fractions, f1=0., f2=100., comment='freq5')
model.validate()
freq.raw_fields()
freq.write_card()
freq.write_card(size=16)
freq1.raw_fields()
freq1.write_card()
freq1.write_card(size=16)
freq2.raw_fields()
freq2.write_card()
freq2.write_card(size=16)
freq4.raw_fields()
freq4.write_card()
freq4.write_card(size=16)
freq5.raw_fields()
freq5.write_card()
freq5.write_card(size=16)
bdf_file = StringIO()
model.write_bdf(bdf_file, close=False)
unused_out = bdf_file.getvalue()
bdf_file.seek(0)
model2 = read_bdf(bdf_file, punch=True, debug=False)
model2.uncross_reference()
model2.safe_cross_reference()
model2.uncross_reference()
save_load_deck(model)
def test_tload(self):
"""tests DLOAD, TLOAD1, TLOAD2, TABLED2 cards"""
model = BDF(debug=False)
model.set_error_storage(nparse_errors=0, stop_on_parsing_error=True,
nxref_errors=0, stop_on_xref_error=True)
sid = 2
excite_id = 20
delay = 0
tid = 42
tload1 = model.add_tload1(sid, excite_id, tid, delay=0, Type='LOAD',
us0=0.0, vs0=0.0, comment='tload1')
tload1 = model.add_tload1(sid, excite_id, tid, delay=1., Type='DISP',
us0=0.0, vs0=0.0, comment='')
tload1 = model.add_tload1(sid, excite_id, tid, delay=2, Type='VELO',
us0=0.0, vs0=0.0, comment='')
tload1 = model.add_tload1(sid, excite_id, tid, delay=0, Type='ACC',
us0=0.0, vs0=0.0, comment='')
nid = 100
model.add_grid(nid, [0., 0., 0.])
darea_id = excite_id
component = 4
scale = 1.
model.add_darea(darea_id, nid, component, scale, comment='')
sid = 3
excite_id = 30
tload2 = model.add_tload2(sid, excite_id, delay=0, Type='LOAD',
T1=0., T2=None, frequency=0., phase=0.,
c=0., b=0., us0=0., vs0=0., comment='tload2')
tload2 = model.add_tload2(sid, excite_id, delay=1., Type='D',
T1=0., T2=None, frequency=0., phase=0.,
c=0., b=0., us0=0., vs0=0., comment='')
tload2 = model.add_tload2(sid, excite_id, delay=2, Type='V',
T1=0., T2=None, frequency=0., phase=0.,
c=0., b=0., us0=0., vs0=0., comment='')
tload2 = model.add_tload2(sid, excite_id, delay=0, Type='A',
T1=0., T2=1., frequency=0., phase=0.,
c=0., b=0., us0=0., vs0=0., comment='')
darea_id = excite_id
component = 4
scale = 1.
model.add_darea(darea_id, nid, component, scale, comment='')
delay_id = 2
nodes = 100
components = 2
delays = 1.5
delay = model.add_delay(delay_id, nodes, components, delays)
sid = 1
scale = 1.0
scale_factors = 1.
load_ids = 2
dload = model.add_dload(sid, scale, scale_factors, load_ids,
comment='dload')
x1 = 0.1
x = np.linspace(0., 1.)
y = np.sin(x)
tabled2 = model.add_tabled2(tid, x1, x, y, comment='tabled2')
model.pop_parse_errors()
delay.validate()
delay.raw_fields()
delay.write_card()
delay.write_card(size=16)
tload1.validate()
tload1.raw_fields()
tload1.write_card()
tload1.write_card(size=16)
tload2.validate()
tload2.raw_fields()
tload2.write_card()
tload2.write_card(size=16)
dload.validate()
dload.raw_fields()
dload.write_card()
dload.write_card(size=16)
tabled2.validate()
tabled2.raw_fields()
tabled2.write_card()
tabled2.write_card(size=16)
model.validate()
model.cross_reference()
model.pop_xref_errors()
bdf_file = StringIO()
model.write_bdf(bdf_file, close=False)
unused_out = bdf_file.getvalue()
bdf_file.seek(0)
unused_outs = model.get_bdf_stats(return_type='list')
unused_outs = model.get_bdf_stats(return_type='string')
time = 0.5
out1 = tload1.get_load_at_time(time, scale=1.)
out2 = tload2.get_load_at_time(time, scale=1.)
#print(out1)
assert len(out1) == 1, out1
assert len(out2) == 1, out2
#print(out1)
#print(out2)
time = [0.5, 0.9]
out1 = tload1.get_load_at_time(time, scale=1.)
out2 = tload2.get_load_at_time(time, scale=1.)
assert len(out1) == 2, out1
assert len(out2) == 2, out2
#print(out1)
#print(out2)
model2 = read_bdf(bdf_file, punch=True, debug=False)
model2.uncross_reference()
model2.safe_cross_reference()
model2.uncross_reference()
#print(out)
#print(outs)
save_load_deck(model, run_renumber=False, run_convert=False)
def test_rload(self):
"""tests DLOAD, RLOAD1, RLOAD2, TABLED2 cards"""
model = BDF(debug=False)
#model.case_control_deck = CaseControlDeck(['DLOAD=2', 'BEGIN BULK'])
sid = 2
excite_id = 20
delay = 0
tid = 42
rload1 = model.add_rload1(sid, excite_id, delay=0, dphase=0, tc=0,
td=0, Type='LOAD', comment='rload1')
rload1 = model.add_rload1(sid, excite_id, delay=1., dphase=0, tc=0,
td=0, Type='DISP', comment='rload1')
rload1 = model.add_rload1(sid, excite_id, delay=2, dphase=0, tc=0,
td=0, Type='VELO', comment='rload1')
rload1 = model.add_rload1(sid, excite_id, delay=0, dphase=0, tc=0,
td=0, Type='ACC', comment='rload1')
sid = 3
excite_id = 30
rload2 = model.add_rload2(sid, excite_id, delay=0, dphase=0, tb=0,
tp=0, Type='LOAD', comment='rload2')
rload2 = model.add_rload2(sid, excite_id, delay=1., dphase=0, tb=0,
tp=0, Type='D', comment='rload2')
rload2 = model.add_rload2(sid, excite_id, delay=2, dphase=0, tb=0,
tp=0, Type='V', comment='rload2')
rload2 = model.add_rload2(sid, excite_id, delay=0, dphase=0, tb=0,
tp=0, Type='A', comment='rload2')
excite_id = 20
nid = 21
c = 1
scale = 1.0
model.add_darea(excite_id, nid, c, scale, comment='darea')
model.add_grid(nid, [0., 0., 0.])
excite_id = 30
model.add_darea(excite_id, nid, c, scale, comment='darea')
delay_id = 2
nodes = 100
components = 2
delays = 1.5
delay = model.add_delay(delay_id, nodes, components, delays)
sid = 1
scale = 1.0
scale_factors = 1.
load_ids = 2
dload = model.add_dload(sid, scale, scale_factors, load_ids,
comment='dload')
x1 = 0.1
x = np.linspace(0., 1.)
y = np.sin(x)
tabled2 = model.add_tabled2(tid, x1, x, y, comment='tabled2')
model.pop_parse_errors()
delay.validate()
delay.raw_fields()
delay.write_card()
delay.write_card(size=16)
rload1.validate()
rload1.raw_fields()
rload1.write_card()
rload1.write_card(size=16)
rload2.validate()
rload2.raw_fields()
rload2.write_card()
rload2.write_card(size=16)
dload.validate()
dload.raw_fields()
dload.write_card()
dload.write_card(size=16)
tabled2.validate()
tabled2.raw_fields()
tabled2.write_card()
tabled2.write_card(size=16)
model.validate()
model.cross_reference()
model.pop_xref_errors()
#print(model.dareas)
bdf_file = StringIO()
model.write_bdf(bdf_file, close=False)
unused_out = bdf_file.getvalue()
bdf_file.seek(0)
unused_outs = model.get_bdf_stats(return_type='list')
unused_outs = model.get_bdf_stats(return_type='string')
freq = 0.5
out1 = rload1.get_load_at_freq(freq, scale=1.)
#out2 = rload2.get_load_at_time(freq, scale=1.)
#print(out1)
#print(out2)
assert len(out1) == 1, out1
#assert len(out2) == 1, out2
freq = [0.5, 0.9]
out1 = rload1.get_load_at_freq(freq, scale=1.)
#out2 = rload2.get_load_at_freq(freq, scale=1.)
#print(out1)
#print(out2)
assert len(out1) == 2, out1
#assert len(out2) == 2, out2
model2 = read_bdf(bdf_file, punch=True, debug=False)
model2.uncross_reference()
model2.safe_cross_reference()
model2.uncross_reference()
#print(out)
#print(outs)
save_load_deck(model, run_renumber=False, run_convert=False)
def test_ascre(self):
"""tests ASCRE, DELAY, DPHASE, TABLED2"""
model = BDF(debug=False)
sid = 1
excite_id = 2
rho = 1.0
b = 2.0
acsrce = model.add_acsrce(sid, excite_id, rho, b, delay=0, dphase=0, power=0,
comment='acsrce')
acsrce.raw_fields()
sid = 3
excite_id = 4
rho = 1.0
b = 2.0
delay = 3
dphase = 4
power = 5
unused_acsrce2 = model.add_acsrce(sid, excite_id, rho, b, delay=delay,
dphase=dphase, power=power)
nodes = 4
components = 5
delays = 6.0
delay = model.add_delay(sid, nodes, components, delays, comment='')
nodes = 4
components = 6
phase_leads = 2.0
delay = model.add_dphase(sid, nodes, components, phase_leads)
tid = power
x1 = 1.
x = np.linspace(0., 1.) + 10.
y = np.sin(x) + 2.
model.add_tabled2(tid, x1, x, y, comment='tabled2')
model.add_grid(4, [0., 0., 0.])
model.validate()
model.pop_parse_errors()
model.cross_reference()
model.pop_xref_errors()
save_load_deck(model, run_convert=False)
def test_nlparm(self):
"""tests NLPARM"""
model = BDF(debug=False)
nlparm_id = 42
model.add_nlparm(nlparm_id, comment='nlparm')
save_load_deck(model)
def test_nlpci(self):
"""tests NLPCI"""
model = BDF(debug=False)
nlpci_id = 42
nlpci = model.add_nlpci(nlpci_id, Type='CRIS', minalr=0.25, maxalr=4.,
scale=0., desiter=12, mxinc=20,
comment='nlpci')
nlpci.raw_fields()
#print(nlpci)
save_load_deck(model)
#def test_rotord(self):
#"""tests ROTORD"""
#model = BDF(debug=False)
#sid = 42
#rstart = 10.0
#rstep = 11.0
#numstep = 10
#rids = []
#rsets = [31]
#rspeeds = [None]
#rcords = []
#w3s = []
#w4s = []
#rforces = []
#brgsets = []
#rotord = model.add_rotord(
#sid, rstart, rstep, numstep,
#rids, rsets, rspeeds, rcords, w3s, w4s, rforces, brgsets,
#refsys='ROT', | |
self.eos_phase_ref.S_dep_l
self.H_dep_T_ref_Pb = self.eos.to_TP(self.T_ref, 101325).H_dep_l
self.S_dep_T_ref_Pb = self.eos.to_TP(self.T_ref, 101325).S_dep_l
if self.Tb:
self.eos_Tb = self.eos.to_TP(self.Tb, 101325)
self.H_dep_Tb_Pb_g = self.eos_Tb.H_dep_g
self.H_dep_Tb_Pb_l = self.eos_Tb.H_dep_l
self.H_dep_Tb_P_ref_g = self.eos.to_TP(self.Tb, self.P_ref).H_dep_g
self.S_dep_Tb_P_ref_g = self.eos.to_TP(self.Tb, self.P_ref).S_dep_g
self.S_dep_Tb_Pb_g = self.eos_Tb.S_dep_g
self.S_dep_Tb_Pb_l = self.eos_Tb.S_dep_l
# if self.Tt and self.Pt:
# self.eos_Tt = self.eos.to_TP(self.Tt, self.Pt)
# self.H_dep_Tt_g = self.eos_Tt.H_dep_g
## self.H_dep_Tt_l = self.eos_Tt.H_dep_l
#
# self.S_dep_Tt_g = self.eos_Tt.S_dep_g
## self.S_dep_Tt_l = self.eos_Tt.S_dep_l
except:
pass
def calc_H(self, T, P):
integrators = {'s': self.HeatCapacitySolid.T_dependent_property_integral,
'l': self.HeatCapacityLiquid.T_dependent_property_integral,
'g': self.HeatCapacityGas.T_dependent_property_integral}
try:
H = self.H_ref
if self.phase == self.phase_ref:
H += integrators[self.phase](self.T_ref, T)
elif self.phase_ref == 's' and self.phase == 'l':
H += self.H_int_T_ref_s_to_Tm + self.Hfusm + integrators['l'](self.Tm, T)
elif self.phase_ref == 'l' and self.phase == 's':
H += -self.H_int_l_Tm_to_T_ref_l - self.Hfusm + integrators['s'](self.Tm, T)
elif self.phase_ref == 'l' and self.phase == 'g':
H += self.H_int_l_T_ref_l_to_Tb + self.Hvap_Tbm + integrators['g'](self.Tb, T)
elif self.phase_ref == 'g' and self.phase == 'l':
H += -self.H_int_Tb_to_T_ref_g - self.Hvap_Tbm + integrators['l'](self.Tb, T)
elif self.phase_ref == 's' and self.phase == 'g':
H += self.H_int_T_ref_s_to_Tm + self.Hfusm + self.H_int_l_Tm_to_Tb + self.Hvap_Tbm + integrators['g'](self.Tb, T)
elif self.phase_ref == 'g' and self.phase == 's':
H += -self.H_int_Tb_to_T_ref_g - self.Hvap_Tbm - self.H_int_l_Tm_to_Tb - self.Hfusm + integrators['s'](self.Tm, T)
else:
raise Exception('Unknown error')
except:
return None
return H
def calc_H_excess(self, T, P):
H_dep = 0
if self.phase_ref == 'g' and self.phase == 'g':
H_dep += self.eos.to_TP(T, P).H_dep_g - self.H_dep_ref_g
elif self.phase_ref == 'l' and self.phase == 'l':
try:
H_dep += self.eos.to_TP(T, P).H_dep_l - self._eos_T_101325.H_dep_l
except:
H_dep += 0
elif self.phase_ref == 'g' and self.phase == 'l':
H_dep += self.H_dep_Tb_Pb_g - self.H_dep_Tb_P_ref_g
H_dep += (self.eos.to_TP(T, P).H_dep_l - self._eos_T_101325.H_dep_l)
elif self.phase_ref == 'l' and self.phase == 'g':
H_dep += self.H_dep_T_ref_Pb - self.H_dep_ref_l
H_dep += (self.eos.to_TP(T, P).H_dep_g - self.H_dep_Tb_Pb_g)
return H_dep
def calc_S_excess(self, T, P):
S_dep = 0
if self.phase_ref == 'g' and self.phase == 'g':
S_dep += self.eos.to_TP(T, P).S_dep_g - self.S_dep_ref_g
elif self.phase_ref == 'l' and self.phase == 'l':
try:
S_dep += self.eos.to_TP(T, P).S_dep_l - self._eos_T_101325.S_dep_l
except:
S_dep += 0
elif self.phase_ref == 'g' and self.phase == 'l':
S_dep += self.S_dep_Tb_Pb_g - self.S_dep_Tb_P_ref_g
S_dep += (self.eos.to_TP(T, P).S_dep_l - self._eos_T_101325.S_dep_l)
elif self.phase_ref == 'l' and self.phase == 'g':
S_dep += self.S_dep_T_ref_Pb - self.S_dep_ref_l
S_dep += (self.eos.to_TP(T, P).S_dep_g - self.S_dep_Tb_Pb_g)
return S_dep
def calc_S(self, T, P):
integrators_T = {'s': self.HeatCapacitySolid.T_dependent_property_integral_over_T,
'l': self.HeatCapacityLiquid.T_dependent_property_integral_over_T,
'g': self.HeatCapacityGas.T_dependent_property_integral_over_T}
try:
S = self.S_ref
if self.phase == self.phase_ref:
S += integrators_T[self.phase](self.T_ref, T)
if self.phase in ['l', 'g']:
S += -R*log(P/self.P_ref)
elif self.phase_ref == 's' and self.phase == 'l':
S += self.S_int_T_ref_s_to_Tm + self.Hfusm/self.Tm + integrators_T['l'](self.Tm, T)
elif self.phase_ref == 'l' and self.phase == 's':
S += - self.S_int_l_Tm_to_T_ref_l - self.Hfusm/self.Tm + integrators_T['s'](self.Tm, T)
elif self.phase_ref == 'l' and self.phase == 'g':
S += self.S_int_l_T_ref_l_to_Tb + self.Hvap_Tbm/self.Tb + integrators_T['g'](self.Tb, T) -R*log(P/self.P_ref) # TODO add to other states
elif self.phase_ref == 'g' and self.phase == 'l':
S += - self.S_int_Tb_to_T_ref_g - self.Hvapm/self.Tb + integrators_T['l'](self.Tb, T)
elif self.phase_ref == 's' and self.phase == 'g':
S += self.S_int_T_ref_s_to_Tm + self.Hfusm/self.Tm + self.S_int_l_Tm_to_Tb + self.Hvap_Tbm/self.Tb + integrators_T['g'](self.Tb, T)
elif self.phase_ref == 'g' and self.phase == 's':
S += - self.S_int_Tb_to_T_ref_g - self.Hvap_Tbm/self.Tb - self.S_int_l_Tm_to_Tb - self.Hfusm/self.Tm + integrators_T['s'](self.Tm, T)
else:
raise Exception('Unknown error')
except:
return None
return S
def calculate_TH(self, T, H):
def to_solve(P):
self.calculate(T, P)
return self.H - H
return newton(to_solve, self.P)
def calculate_PH(self, P, H):
def to_solve(T):
self.calculate(T, P)
return self.H - H
return newton(to_solve, self.T)
def calculate_TS(self, T, S):
def to_solve(P):
self.calculate(T, P)
return self.S - S
return newton(to_solve, self.P)
def calculate_PS(self, P, S):
def to_solve(T):
self.calculate(T, P)
return self.S - S
return newton(to_solve, self.T)
def set_thermo(self):
try:
self._eos_T_101325 = self.eos.to_TP(self.T, 101325)
self.Hm = self.calc_H(self.T, self.P)
self.Hm += self.calc_H_excess(self.T, self.P)
self.H = property_molar_to_mass(self.Hm, self.MW) if (self.Hm is not None) else None
self.Sm = self.calc_S(self.T, self.P)
self.Sm += self.calc_S_excess(self.T, self.P)
self.S = property_molar_to_mass(self.Sm, self.MW) if (self.Sm is not None) else None
self.G = self.H - self.T*self.S if (self.H is not None and self.S is not None) else None
self.Gm = self.Hm - self.T*self.Sm if (self.Hm is not None and self.Sm is not None) else None
except:
pass
@property
def Um(self):
r'''Internal energy of the chemical at its current temperature and
pressure, in units of [J/mol].
This property requires that :obj:`thermo.chemical.set_thermo` ran
successfully to be accurate.
It also depends on the molar volume of the chemical at its current
conditions.
'''
return self.Hm - self.P*self.Vm if (self.Vm and self.Hm is not None) else None
@property
def U(self):
r'''Internal energy of the chemical at its current temperature and
pressure, in units of [J/kg].
This property requires that :obj:`thermo.chemical.set_thermo` ran
successfully to be accurate.
It also depends on the molar volume of the chemical at its current
conditions.
'''
return property_molar_to_mass(self.Um, self.MW) if (self.Um is not None) else None
@property
def Am(self):
r'''Helmholtz energy of the chemical at its current temperature and
pressure, in units of [J/mol].
This property requires that :obj:`thermo.chemical.set_thermo` ran
successfully to be accurate.
It also depends on the molar volume of the chemical at its current
conditions.
'''
return self.Um - self.T*self.Sm if (self.Um is not None and self.Sm is not None) else None
@property
def A(self):
r'''Helmholtz energy of the chemical at its current temperature and
pressure, in units of [J/kg].
This property requires that :obj:`thermo.chemical.set_thermo` ran
successfully to be accurate.
It also depends on the molar volume of the chemical at its current
conditions.
'''
return self.U - self.T*self.S if (self.U is not None and self.S is not None) else None
### Temperature independent properties - calculate lazily
@property
def charge(self):
r'''Charge of a chemical, computed with RDKit from a chemical's SMILES.
If RDKit is not available, holds None.
Examples
--------
>>> Chemical('sodium ion').charge
1
'''
try:
if not self.rdkitmol:
return charge_from_formula(self.formula)
else:
return Chem.GetFormalCharge(self.rdkitmol)
except:
return charge_from_formula(self.formula)
@property
def rings(self):
r'''Number of rings in a chemical, computed with RDKit from a
chemical's SMILES. If RDKit is not available, holds None.
Examples
--------
>>> Chemical('Paclitaxel').rings
7
'''
try:
return Chem.Descriptors.RingCount(self.rdkitmol)
except:
return None
@property
def aromatic_rings(self):
r'''Number of aromatic rings in a chemical, computed with RDKit from a
chemical's SMILES. If RDKit is not available, holds None.
Examples
--------
>>> Chemical('Paclitaxel').aromatic_rings
3
'''
try:
return Chem.Descriptors.NumAromaticRings(self.rdkitmol)
except:
return None
@property
def rdkitmol(self):
r'''RDKit object of the chemical, without hydrogen. If RDKit is not
available, holds None.
For examples of what can be done with RDKit, see
`their website <http://www.rdkit.org/docs/GettingStartedInPython.html>`_.
'''
if self.__rdkitmol:
return self.__rdkitmol
else:
try:
self.__rdkitmol = Chem.MolFromSmiles(self.smiles)
return self.__rdkitmol
except:
return None
@property
def rdkitmol_Hs(self):
r'''RDKit object of the chemical, with hydrogen. If RDKit is not
available, holds None.
For examples of what can be done with RDKit, see
`their website <http://www.rdkit.org/docs/GettingStartedInPython.html>`_.
'''
if self.__rdkitmol_Hs:
return self.__rdkitmol_Hs
else:
try:
self.__rdkitmol_Hs = Chem.AddHs(self.rdkitmol)
return self.__rdkitmol_Hs
except:
return None
@property
def Hill(self):
r'''Hill formula of a compound. For a description of the Hill system,
see :obj:`thermo.elements.atoms_to_Hill`.
Examples
--------
>>> Chemical('furfuryl alcohol').Hill
'C5H6O2'
'''
if self.__Hill:
return self.__Hill
else:
self.__Hill = atoms_to_Hill(self.atoms)
return self.__Hill
@property
def atom_fractions(self):
r'''Dictionary of atom:fractional occurence of the elements in a
chemical. Useful when performing element balances. For mass-fraction
occurences, see :obj:`mass_fractions`.
Examples
--------
>>> Chemical('Ammonium aluminium sulfate').atom_fractions
{'H': 0.25, 'S': 0.125, 'Al': 0.0625, 'O': 0.5, 'N': 0.0625}
'''
if self.__atom_fractions:
return self.__atom_fractions
else:
self.__atom_fractions = atom_fractions(self.atoms)
return self.__atom_fractions
@property
def mass_fractions(self):
r'''Dictionary of atom:mass-weighted fractional occurence of elements.
Useful when performing mass balances. For atom-fraction occurences, see
:obj:`atom_fractions`.
Examples
--------
>>> Chemical('water').mass_fractions
{'H': 0.11189834407236524, 'O': 0.8881016559276347}
'''
if self.__mass_fractions:
return self.__mass_fractions
else:
self.__mass_fractions = mass_fractions(self.atoms, self.MW)
return self.__mass_fractions
@property
def legal_status(self):
r'''Dictionary of legal status indicators for the chemical.
Examples
--------
>>> pprint(Chemical('benzene').legal_status)
{'DSL': 'LISTED',
'EINECS': 'LISTED',
'NLP': 'UNLISTED',
'SPIN': 'LISTED',
'TSCA': 'LISTED'}
'''
if self.__legal_status:
return self.__legal_status
else:
self.__legal_status = legal_status(self.CAS, Method='COMBINED')
return self.__legal_status
@property
def economic_status(self):
r'''Dictionary of economic status indicators for the chemical.
Examples
--------
>>> pprint(Chemical('benzene').economic_status)
["US public: {'Manufactured': 6165232.1, 'Imported': 463146.474, 'Exported': 271908.252}",
u'1,000,000 - 10,000,000 tonnes per annum',
u'Intermediate Use Only',
'OECD HPV | |
<filename>Java_customization/seleniumActions.py
from cmath import e
from re import X
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
import time
from RPA.Desktop import Desktop
from io import BytesIO
from PIL import Image, ImageFile, ImageGrab
import oci
import logging
from selenium.webdriver.support.ui import Select
logger = logging.getLogger(__name__)
desktop = Desktop()
chrome_driver_path = 'C:/Github/EBS-Automation-POC/Driver/chromedriver.exe'
url = 'http://winfo106.winfosolutions.com:8035/OA_HTML/AppsLocalLogin.jsp?'
username = 'gchockal'
password = '<PASSWORD>'
# toNavigate = 'Purchasing, Vision Operations (USA)>Requisitions>Requisitions'
toNavigate = 'iProcurement>iProcurement Home Page'
isFrameSwithed = False
#defining chrome options
def options():
options = webdriver.ChromeOptions()
options.add_argument('--start-maximized')
options.add_argument("--disable-gpu")
options.add_argument("--disable-web-security")
options.add_argument("--allow-running-insecure-content")
options.add_experimental_option("prefs",{'download.prompt_for_download':True})
return options
def get_xpath(name, tagname):
if len(name.split('>')) > 1 :
name1 = name.split('>')[0]
name = name.split('>')[-1]
xpath = f'//*[text()="{name1}"]//following::'
else :
xpath = '//'
if tagname == 'link':
xpath += f'a[text()="{name}"]'
elif tagname == 'button' :
if name == 'Submit':
xpath += f'button[contains(@title,"Submit")]'
else :
xpath += f'button[text()="{name}"]'
elif tagname == 'input':
xpath += f'input[@name="{name}"]'
elif tagname == 'radio' :
xpath += f'*[text()="{name}"]//ancestor::td//input[@type="radio"]'
elif tagname == 'dropdown':
xpath += f'*[text()="{name}"]//following::select[1]'
elif tagname == 'textarea':
xpath += f'*[text()="{name}"]//following::textarea[1]'
elif tagname == 'searchField':
xpath += f'*[text()="{name}"]//following::input[1]'
elif tagname == 'frame':
name = 'iframe' + ''.join(name.split()) + 'Popup'
xpath += f'iframe[@id="{name}"]'
elif tagname == 'b' :
xpath += f'b[contains(text(),"{name}")]'
else :
xpath += f'*[text()="{name}"]'
return xpath
def create_xpath(navigation):
to_navigate = navigation.split('>')
XPATH = []
constantPathValueStart = '(//div[text()="'
constantPathValueEnd = '"]/parent::a//img[@title])[1]'
XPATH.append(f"//div[text()='{to_navigate[0]}']")
for i in range(1,len(to_navigate) - 1):
XPATH.append(constantPathValueStart + to_navigate[i] + constantPathValueEnd)
if len(to_navigate) > 2 :
if to_navigate[-1] == to_navigate[-2] :
XPATH.append(constantPathValueStart + to_navigate[-2] + '"]//following::div[text()="'+ to_navigate[-1] +'"][2])')
elif to_navigate[-1] == to_navigate[-3]:
XPATH.append(constantPathValueStart + to_navigate[-2] + '"]//following::div[text()="'+ to_navigate[-1] +'"][1])')
else :
XPATH.append(constantPathValueStart+to_navigate[-1]+'"])[1]')
else:
XPATH.append(constantPathValueStart+to_navigate[-1]+'"])[1]')
return XPATH
def save_and_open_jnlp():
time.sleep(5)
desktop.press_keys('enter')
time.sleep(5)
desktop.press_keys('ctrl', 'j')
time.sleep(5)
desktop.press_keys('tab')
time.sleep(0.1)
desktop.press_keys('tab')
time.sleep(0.1)
desktop.press_keys('enter')
time.sleep(2)
desktop.press_keys('ctrl','w')
time.sleep(10)
def take_screeshot_as_jpg(path):
"""
It will take screenshot as jpg and will upload the file to object store.
"""
#to be written
# pass
try :
snapshot = ImageGrab.grab()
snapshot.save(path)
config = oci.config.from_file("C:\\oci\\config","WATS_WINFOERP")
object_storage = oci.object_storage.ObjectStorageClient(config)
namespace = object_storage.get_namespace().data
bucket_name = "obj-watsdev01-standard"
path_name="/".join(path.split("\\")[-4:-1])
logger.info(f"Uploading to {path_name} in oci")
object_name="/".join(path.split("\\")[-4:])
ImageFile.MAXBLOCK = 2**20
imagefile = BytesIO()
img = Image.open(path)
img.save(imagefile, "JPEG", quality=80, optimize=True, progressive=True)
imagedata = imagefile.getvalue()
obj = object_storage.put_object(
namespace,
bucket_name,
object_name,
imagedata)
logger.info(f"Uploaded to {path_name} in oci")
except Exception as e :
raise Exception(e," file upload failed")
selenium_driver = webdriver.Chrome(executable_path=chrome_driver_path, options=options())
def ebsLogin(url, username, password, path_to_screenhot, screenshot_file_name):
trycounter = 0
while trycounter < 3 :
try :
trycounter += 1
selenium_driver.get(url)
username_field = selenium_driver.find_element_by_xpath('//*[@name="usernameField"]')
password_field = selenium_driver.find_element_by_xpath('//*[@name="passwordField"]')
login_btn = selenium_driver.find_element_by_xpath('//button[text()="Log In"]')
username_field.send_keys(username)
password_field.send_keys(password)
s =login_btn.click()
print('Login button clicked ', s)
# take_screeshot_as_jpg(path_to_screenhot + screenshot_file_name)
trycounter = 3
except Exception as e :
if trycounter < 3 : continue
else :
# take_screeshot_as_jpg(path_to_screenhot + screenshot_file_name)
raise Exception(e)
def ebsSwitchWindow(name):
desktop.press_keys('ctrl','w')
# to be done generic
# since we are using two driver so it is not particularly required now
def ebsNavigate(navigation, path_to_screenhot, screenshot_file_name):
trycounter = 0
while trycounter < 3 :
try :
trycounter += 1
XPATHS = create_xpath(navigation)
print(XPATHS)
for xpath in XPATHS :
print(xpath)
_wait_until_element_is_found(xpath)
xpath_field = selenium_driver.find_element_by_xpath(xpath)
xpath_field.click()
# take_screeshot_as_jpg(path_to_screenhot + screenshot_file_name)
time.sleep(2)
trycounter = 3
except Exception as e :
if trycounter < 3 : continue
else :
# take_screeshot_as_jpg(path_to_screenhot + screenshot_file_name)
raise Exception(e)
def openEbs():
try :
save_and_open_jnlp()
except Exception as e :
raise Exception('Failed To Open Jnlp after downloading')
def ebsClickLink(xpath_param, path_to_screenhot, screenshot_file_name):
trycounter = 0
while trycounter < 3 :
try :
trycounter += 1
# selenium_driver.find_element_by_link_text(xpath_param).click()
xpath = get_xpath(xpath_param,'link')
_wait_until_element_is_found(xpath)
xpath_field = selenium_driver.find_element_by_xpath(xpath)
xpath_field.click()
trycounter = 3
# take_screeshot_as_jpg(path_to_screenhot + screenshot_file_name)
time.sleep(2)
except Exception as e :
if trycounter < 3 : continue
else :
# take_screeshot_as_jpg(path_to_screenhot + screenshot_file_name)
raise Exception(e)
def ebsClickButton(xpath_param, path_to_screenhot, screenshot_file_name):
trycounter = 0
while trycounter < 3 :
try :
trycounter += 1
xpath = get_xpath(xpath_param,'button')
_wait_until_element_is_found(xpath)
xpath_field = selenium_driver.find_elements_by_xpath(xpath)
print(xpath_field)
xpath_field[0].click()
# take_screeshot_as_jpg(path_to_screenhot + screenshot_file_name)
trycounter = 3
time.sleep(2)
except Exception as e :
if trycounter < 3 :
continue
else :
# take_screeshot_as_jpg(path_to_screenhot + screenshot_file_name)
raise Exception(e)
def ebsChooseRadioButton(xpath_param, path_to_screenhot, screenshot_file_name):
trycounter = 0
while trycounter < 3 :
try :
trycounter += 1
xpath = get_xpath(xpath_param, 'radio')
_wait_until_element_is_found(xpath)
xpath_field = selenium_driver.find_element_by_xpath(xpath)
xpath_field.click()
# take_screeshot_as_jpg(path_to_screenhot + screenshot_file_name)
trycounter = 3
time.sleep(2)
except Exception as e :
if trycounter < 3 : continue
else :
# take_screeshot_as_jpg(path_to_screenhot + screenshot_file_name)
raise Exception(e)
def ebsSelectDropDown(xpath_param, value, path_to_screenhot, screenshot_file_name):
trycounter = 0
while trycounter < 3 :
try :
trycounter += 1
xpath = get_xpath(xpath_param, 'dropdown')
_wait_until_element_is_found(xpath)
xpath_field = Select(selenium_driver.find_element_by_xpath(xpath))
if value != '' :
xpath_field.select_by_visible_text(value)
else :
# xpath_field.deselect_all()
# xpath_field.deselect_by_index(1)
# xpath_field.select_by_value(value)
xpath_field.select_by_index(0)
# take_screeshot_as_jpg(path_to_screenhot + screenshot_file_name)
trycounter = 3
time.sleep(2)
except Exception as e :
if trycounter < 3 : continue
else :
# take_screeshot_as_jpg(path_to_screenhot + screenshot_file_name)
raise Exception(e)
def ebsInputTextField(xpath_param, value, path_to_screenhot, screenhot_file_name):
trycounter = 0
while trycounter < 3 :
try :
trycounter += 1
xpath = get_xpath(xpath_param, 'input')
_wait_until_element_is_found(xpath)
xpath_field = selenium_driver.find_element_by_xpath(xpath)
_clear(xpath_field)
xpath_field.send_keys(value)
xpath_field.send_keys(Keys.TAB)
# take_screeshot_as_jpg(path_to_screenhot + screenshot_file_name)
trycounter = 3
time.sleep(2)
except Exception as e :
if trycounter < 3 : continue
else :
# take_screeshot_as_jpg(path_to_screenhot + screenshot_file_name)
raise Exception(e)
def ebsInputTextEreaField(xpath_param, value, path_to_screenhot, screenhot_file_name):
trycounter = 0
while trycounter < 3 :
try :
trycounter += 1
xpath = get_xpath(xpath_param, 'textarea')
_wait_until_element_is_found(xpath)
xpath_field = selenium_driver.find_element_by_xpath(xpath)
xpath_field.send_keys(value)
# take_screeshot_as_jpg(path_to_screenhot + screenshot_file_name)
trycounter = 3
time.sleep(2)
except Exception as e :
if trycounter < 3 : continue
else :
# take_screeshot_as_jpg(path_to_screenhot + screenshot_file_name)
raise Exception(e)
def ebsEnterSearchTextField(xpath_param, value, path_to_screenhot, screenshot_file_name):
trycounter = 0
# while trycounter < 3 :
try :
trycounter += 1
xpath = get_xpath(xpath_param, 'searchField')
_wait_until_element_is_found(xpath)
xpath_field = selenium_driver.find_element_by_xpath(xpath)
xpath_field.send_keys(value)
time.sleep(2)
selenium_driver.find_element_by_xpath(f'{xpath}/following::img[@title="Search: Supplier Name"]').click()
# xpath_field.send_keys(Keys.TAB)
# time.sleep(2)
# print("entering")
# xpath_field.send_keys(Keys.RETURN)
# print("entered")
time.sleep(10)
# xpath_field.send_keys(Keys.TAB)
# time.sleep(2)
xpath_param1 = xpath_param.split('>')[-1]
# frame = selenium_driver.find_element_by_xpath('//iframe[@id="iframelovPopUp_SupplierOnNonCat" and contains(@style,"visibility: visible")]')
selenium_driver.switch_to_frame(f'Search and Select: {xpath_param1}') #'iframelovPopUp_SupplierOnNonCat') #//iframe[contains(@style,"visibility: visible")]')
# selenium_driver.find_element_by_xpath(f'//*[text()=') //*[text()="Search and Select: {xpath_param}"]'
selenium_driver.find_element_by_xpath('(//*[text()="Advanced Network Devices"]/preceding::input[@type="radio"])[1]').click() # hardcoded as I am not able to find alternate solutions need to discuss
# selenium_driver.find_element_by_xpath('//*[@id="lovBtnSelect"]').click()
ebsClickButton('Select', path_to_screenhot, screenshot_file_name)
selenium_driver.switch_to_default_content()
# //*[text()="Search and Select: Supplier Name"]
# take_screeshot_as_jpg(path_to_screenhot + screenshot_file_name)
trycounter = 3
time.sleep(2)
except Exception as e :
# if trycounter < 3 : continue
# else :
# take_screeshot_as_jpg(path_to_screenhot + screenshot_file_name)
raise Exception(e)
def ebsSwichFrame(xpath_param, path_to_screenhot, screenshot_file_name):
trycounter = 0
global isFrameSwithed
while trycounter < 3 :
try :
# isFrameSwithed = True
trycounter += 1
xpath = get_xpath(xpath_param, 'frame')
_wait_until_element_is_found(xpath)
print(xpath)
xpath_field = selenium_driver.find_element_by_xpath(xpath)
print(xpath_field)
selenium_driver.switch_to.frame(xpath_field)
# take_screeshot_as_jpg(path_to_screenhot + screenshot_file_name)
trycounter = 3
time.sleep(2)
except Exception as e :
if trycounter < 3 :
continue
else :
# take_screeshot_as_jpg(path_to_screenhot + screenshot_file_name)
raise Exception(e)
def ebsSwitchToDefaultFrame(path_to_screenhot, screenshot_file_name):
trycounter = 0
while trycounter < 3 :
try :
trycounter += 1
time.sleep(5)
# selenium_driver.switch_to_default_content()
selenium_driver.switch_to.default_content()
# take_screeshot_as_jpg(path_to_screenhot + screenshot_file_name)
trycounter = 3
time.sleep(2)
except Exception as e :
if trycounter < 3 :
continue
else :
# take_screeshot_as_jpg(path_to_screenhot + screenshot_file_name)
raise Exception(e)
def _wait_until_element_is_found(xpath_field):
waitTime = 0
while waitTime < 15 :
try :
waitTime += 1
selenium_driver.find_element_by_xpath(xpath_field)
waitTime = 15
except Exception as e:
# if waitTime < 15:
time.sleep(1)
continue
copiedValue = ''
def ebsCopyTextValue(xpath_param, path_to_screenhot, screenshot_file_name):
global copiedValue
trycounter = 0
while trycounter < 3 :
try :
xpath = get_xpath(xpath_param, 'b')
_wait_until_element_is_found(xpath)
xpath_fields = selenium_driver.find_elements_by_xpath(xpath)
for field in xpath_fields :
Text = field.text
for text in list(Text.split()) :
if text.isnumeric() :
copiedValue = text
print(text)
break
trycounter = 3
# take_screeshot_as_jpg(path_to_screenhot + screenshot_file_name)
except :
if trycounter < 3 :
continue
else :
# take_screeshot_as_jpg(path_to_screenhot + screenshot_file_name)
raise Exception(e)
def ebsPasteTextValue(xpath_param, path_to_screenhot, screenshot_file_name, value = copiedValue):
ebsInputTextField(xpath_param, value, path_to_screenhot, screenshot_file_name)
def _clear(xpath_field):
try :
xpath_field.clear()
# xpath_field.send_keys(Keys.CONTROL + 'A')
# xpath_field.send_keys(Keys.BACKSPACE)
except :
print('clear failed')
pass
path_to_screenhot, screenshot_file_name = 'C:\\Github\\EBS-Automation-POC\\Java_customization\\Selenium\\', '1.jpg'
ebsLogin(url, username, password, path_to_screenhot, screenshot_file_name)
# time.sleep(5)
ebsNavigate(toNavigate, path_to_screenhot, screenshot_file_name)
# time.sleep(5)
ebsClickLink('Shop>Non-Catalog Request', path_to_screenhot, screenshot_file_name)
# time.sleep(5)
ebsChooseRadioButton('Category Search>Yes, I already have a specific supplier in mind', path_to_screenhot, screenshot_file_name)
ebsClickButton('Category Search>Next', path_to_screenhot, screenshot_file_name)
# time.sleep(5)
ebsSelectDropDown("Non-Catalog Request>What do you need to request?", 'Goods or Services.I can provide description and Total Amount', path_to_screenhot, screenshot_file_name)
# time.sleep(10)
ebsInputTextEreaField('Non-Catalog Request>Item Description', 'AMC', path_to_screenhot, screenshot_file_name)
ebsInputTextField('Non-Catalog Request>Amount', '1500', path_to_screenhot, screenshot_file_name)
ebsInputTextField('Non-Catalog Request>Category', 'MISC.CONSULTING', path_to_screenhot, screenshot_file_name)
ebsEnterSearchTextField('Find your Supplier>Supplier | |
import numpy as np
import warnings
from stingray.base import StingrayObject
from stingray.gti import check_separate, cross_two_gtis
from stingray.lightcurve import Lightcurve
from stingray.utils import assign_value_if_none, simon, excess_variance, show_progress
from stingray.fourier import avg_cs_from_events, avg_pds_from_events, fftfreq, get_average_ctrate
from stingray.fourier import poisson_level, error_on_averaged_cross_spectrum, cross_to_covariance
from abc import ABCMeta, abstractmethod
__all__ = [
"VarEnergySpectrum",
"RmsEnergySpectrum",
"RmsSpectrum",
"LagEnergySpectrum",
"LagSpectrum",
"ExcessVarianceSpectrum",
"CovarianceSpectrum",
"ComplexCovarianceSpectrum",
"CountSpectrum",
]
def get_non_overlapping_ref_band(channel_band, ref_band):
"""
Ensures that the ``channel_band`` (i.e. the band of interest) is
not contained within the ``ref_band`` (i.e. the reference band)
Parameters
----------
channel_band : iterable of type ``[elow, ehigh]``
The lower/upper limits of the energies to be contained in the band
of interest
ref_band : iterable
The lower/upper limits of the energies in the reference band
Returns
-------
ref_intervals : iterable
The channels that are both in the reference band in not in the
bands of interest
Examples
--------
>>> channel_band = [2, 3]
>>> ref_band = [[0, 10]]
>>> new_ref = get_non_overlapping_ref_band(channel_band, ref_band)
>>> np.allclose(new_ref, [[0, 2], [3, 10]])
True
Test this also works with a 1-D ref. band
>>> new_ref = get_non_overlapping_ref_band(channel_band, [0, 10])
>>> np.allclose(new_ref, [[0, 2], [3, 10]])
True
>>> new_ref = get_non_overlapping_ref_band([0, 1], [[2, 3]])
>>> np.allclose(new_ref, [[2, 3]])
True
"""
channel_band = np.asarray(channel_band)
ref_band = np.asarray(ref_band)
if len(ref_band.shape) <= 1:
ref_band = np.asarray([ref_band])
if check_separate(ref_band, [channel_band]):
return np.asarray(ref_band)
not_channel_band = [
[0, channel_band[0]],
[channel_band[1], np.max([np.max(ref_band), channel_band[1] + 1])],
]
return cross_two_gtis(ref_band, not_channel_band)
def _decode_energy_specification(energy_spec):
"""Decode the energy specification tuple.
Parameters
----------
energy_spec : iterable
list containing the energy specification
Must have the following structure:
* energy_spec[0]: lower edge of (log) energy space
* energy_spec[1]: upper edge of (log) energy space
* energy_spec[2] +1 : energy bin edges (hence the +1)
* {`lin` | `log`} flat deciding whether the energy space is linear
or logarithmic
Returns
-------
energies : numpy.ndarray
An array of lower/upper bin edges for the energy array
Examples
--------
>>> _decode_energy_specification([0, 2, 2, 'lin'])
Traceback (most recent call last):
...
ValueError: Energy specification must be a tuple
>>> a = _decode_energy_specification((0, 2, 2, 'lin'))
>>> np.allclose(a, [0, 1, 2])
True
>>> a = _decode_energy_specification((1, 4, 2, 'log'))
>>> np.allclose(a, [1, 2, 4])
True
"""
if not isinstance(energy_spec, tuple):
raise ValueError("Energy specification must be a tuple")
if energy_spec[-1].lower() not in ["lin", "log"]:
raise ValueError("Incorrect energy specification")
log_distr = True if energy_spec[-1].lower() == "log" else False
if log_distr:
energies = np.logspace(
np.log10(energy_spec[0]), np.log10(energy_spec[1]), energy_spec[2] + 1
)
else:
energies = np.linspace(energy_spec[0], energy_spec[1], energy_spec[2] + 1)
return energies
class VarEnergySpectrum(StingrayObject, metaclass=ABCMeta):
main_array_attr = "energy"
"""
Base class for variability-energy spectrum.
This class is only a base for the various variability spectra, and it's
not to be instantiated by itself.
Parameters
----------
events : :class:`stingray.events.EventList` object
event list
freq_interval : ``[f0, f1]``, floats
the frequency range over which calculating the variability quantity
energy_spec : list or tuple ``(emin, emax, N, type)``
if a ``list`` is specified, this is interpreted as a list of bin edges;
if a ``tuple`` is provided, this will encode the minimum and maximum
energies, the number of intervals, and ``lin`` or ``log``.
Other Parameters
----------------
ref_band : ``[emin, emax``], floats; default ``None``
minimum and maximum energy of the reference band. If ``None``, the
full band is used.
use_pi : bool, default ``False``
Use channel instead of energy
events2 : :class:`stingray.events.EventList` object
event list for the second channel, if not the same. Useful if the
reference band has to be taken from another detector.
return_complex: bool, default False
In spectra that produce complex values, return the whole spectrum.
Otherwise, the absolute value will be returned.
Attributes
----------
events1 : array-like
list of events used to produce the spectrum
events2 : array-like
if the spectrum requires it, second list of events
freq_interval : array-like
interval of frequencies used to calculate the spectrum
energy_intervals : ``[[e00, e01], [e10, e11], ...]``
energy intervals used for the spectrum
spectrum : array-like
the spectral values, corresponding to each energy interval
spectrum_error : array-like
the error bars corresponding to spectrum
energy : array-like
The centers of energy intervals
"""
def __init__(
self,
events,
freq_interval,
energy_spec,
ref_band=None,
bin_time=1,
use_pi=False,
segment_size=None,
events2=None,
return_complex=False,
):
self.events1 = events
self.events2 = assign_value_if_none(events2, events)
self._analyze_inputs()
# This will be set to True in ComplexCovariance
self.return_complex = return_complex
self.freq_interval = freq_interval
self.use_pi = use_pi
self.bin_time = bin_time
if isinstance(energy_spec, tuple):
energies = _decode_energy_specification(energy_spec)
else:
energies = np.asarray(energy_spec)
self.energy_intervals = list(zip(energies[0:-1], energies[1:]))
self.ref_band = np.asarray(assign_value_if_none(ref_band, [0, np.inf]))
if len(self.ref_band.shape) <= 1:
self.ref_band = np.asarray([self.ref_band])
self.segment_size = self.delta_nu = None
if segment_size is not None:
self.segment_size = segment_size
self.delta_nu = 1 / self.segment_size
self._create_empty_spectrum()
if len(events.time) == 0:
simon("There are no events in your event list!" + "Can't make a spectrum!")
else:
self._spectrum_function()
@property
def energy(self):
"""Give the centers of the energy intervals."""
return np.sum(self.energy_intervals, axis=1) / 2
def _analyze_inputs(self):
"""Make some checks on the inputs and set some internal variable.
If the object of events1 is the same as events2, set `same_events` to True.
This will, for example, tell the methods to use events1 for the subject bands
and events2 for the reference band (useful in deadtime-affected data).
Also, if the event lists are distinct, calculate common GTIs.
"""
events1 = self.events1
events2 = self.events2
common_gti = events1.gti
if events2 is None or events2 is events1:
self.events2 = self.events1
self.same_events = True
else:
common_gti = cross_two_gtis(events1.gti, events2.gti)
self.same_events = False
self.gti = common_gti
def _create_empty_spectrum(self):
"""Allocate the arrays of the output spectrum.
Default value is NaN. This is because most spectral timing products are
prone to numerical errors, and it's more informative to have a default invalid
value rather than something like, e.g., 0 or 1
"""
if self.return_complex:
dtype = complex
else:
dtype = float
self.spectrum = np.zeros(len(self.energy_intervals), dtype=dtype) + np.nan
self.spectrum_error = np.zeros_like(self.spectrum, dtype=dtype) + np.nan
def _get_times_from_energy_range(self, events, erange, use_pi=False):
"""Get event times from the wanted energy range.
Parameters
----------
events : `EventList`
Input event list
erange : [e0, e1]
Energy range in keV
Other parameters
----------------
use_pi : bool, default False
Use the PI channel instead of energies
Returns
-------
out_ev : `EventList`
The filtered event list.
"""
if use_pi:
energies = events.pi
else:
energies = events.energy
mask = (energies >= erange[0]) & (energies < erange[1])
return events.time[mask]
def _get_good_frequency_bins(self, freq=None):
"""Get frequency mask corresponding to the wanted frequency interval
Parameters
----------
freq : `np.array`, default None
The frequency array. If None, it will get calculated from the number
of spectral bins using `np.fft.fftfreq`
Returns
-------
freq_mask : `np.array` of bool
The frequency mask.
"""
if freq is None:
n_bin = np.rint(self.segment_size / self.bin_time)
freq = fftfreq(int(n_bin), self.bin_time)
freq = freq[freq > 0]
good = (freq >= self.freq_interval[0]) & (freq < self.freq_interval[1])
return good
def _construct_lightcurves(
self, channel_band, tstart=None, tstop=None, exclude=True, only_base=False
):
"""
Construct light curves from event data, for each band of interest.
Parameters
----------
channel_band : iterable of type ``[elow, ehigh]``
The lower/upper limits of the energies to be contained in the band
of interest
tstart : float, optional, default ``None``
A common start time (if start of observation is different from
the first recorded event)
tstop : float, optional, default ``None``
A common stop time (if start of observation is different from
the first recorded event)
exclude : bool, optional, default ``True``
if ``True``, exclude the band of interest from the reference band
only_base : bool, optional, default ``False``
if ``True``, only return the light curve of the channel of interest, not
that of the reference band
Returns
-------
base_lc : :class:`Lightcurve` object
The light curve of the channels of interest
ref_lc : :class:`Lightcurve` object (only returned if ``only_base`` is ``False``)
The reference light curve for comparison with ``base_lc``
"""
if self.use_pi:
energies1 = self.events1.pi
energies2 = self.events2.pi
else:
energies2 = self.events2.energy
energies1 = self.events1.energy
gti = cross_two_gtis(self.events1.gti, self.events2.gti)
tstart = assign_value_if_none(tstart, gti[0, 0])
tstop = assign_value_if_none(tstop, gti[-1, -1])
good = (energies1 >= channel_band[0]) & (energies1 < channel_band[1])
base_lc = Lightcurve.make_lightcurve(
| |
if self.stop_btn.cget('state') == 'normal': # Stop button is enabled (program is ok to stop)
self.__stop() # Stops program execution
elif (self.row, self.col) != (None, None): # Checks that a square is selected
if event.char.isnumeric(): # If entered key is a digit
self.__display_number(self.row, self.col, event.char, color='#FC5F17') # Displays digit in canvas
self.reset_btn.config(state=tkinter.NORMAL) # Enables the reset button
if self.col == 8: # If selected cell is in the last column
if self.row != 8: # If the selected cell is not in the last row
self.row, self.col = self.row+1, 0 # Selects first cell of next row
else: # If selected cell is not in the last column
self.col += 1 # Selects next cell across
self.__draw_border()
elif event.keysym == 'BackSpace': # If backspace is pressed
self.__display_number(self.row, self.col, None) # Resets the square
### START/STOP/RESET METHODS
def __start(self):
'Begins the dynamic solving of the grid'
self.row, self.col = None, None # Resets the currently selected cell row and column
self.canvas.delete('cursor') # Deletes the previous cursor
self.grid = [ # Makes a new empty 8x8 grid which will store the user-entered values
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0]
]
# Stores each user-entered number in self.grid
for ypos, row in enumerate(self.grid): # Goes through each row in the grid
for xpos, _ in enumerate(row): # Goes through each position in the row
grid_object = self.canvas.find_withtag((ypos,xpos),) # Gets the grid number object with tag at respective position (row, column)
value = self.canvas.itemcget(grid_object, 'text') # Gets the value of the specific grid number; 'text' argument specifies we want to extract the text
# Note that value could be None
if value: # If the cell is filled in
self.grid[ypos][xpos] = int(value)
else: # If the cell is empty
self.grid[ypos][xpos] = 0
if not self.__validate_selected_grid(): # If the grid is not valid in format
return None # Returns early
else: # Grid is valid in format; GRID MAY NOT HAVE ANY SOLUTIONS
self.__update_grid(self.grid) # Displays the grid
threading.Thread(target=self.__solver_thread).start() # Initiates the solver thread
def __solver_thread(self):
'Main solver thread that solves self.grid'
self.running = True # Allows the solver thread to run
self.modify = False # Grid modification feature must be disabled when grid is solving
self.file_submenu.entryconfig(0, state=tkinter.DISABLED) # Disables the load functionality when program is running
self.file_submenu.entryconfig(2, state=tkinter.DISABLED) # Disables the save as functionality when program is running
self.option_submenu.entryconfig(0, state=tkinter.DISABLED) # Disables animations delay setting
self.start_btn.config(state=tkinter.DISABLED) # Disabled start button until execution is finished
self.stop_btn.config(state=tkinter.NORMAL) # Enables the stop button until execution is finished
self.reset_btn.config(state=tkinter.DISABLED) # Disables the reset button until execution is finished
self.status_bar.config(text='Executing solve.', fg='white') # Updates status bar
self.loading_bar.start() # Starts the loading bar animation
self.interrupted = self.__solve_grid() # Solves the grid and returns True (was interrupted) or False (was not interrupted); used for displaying or auto saving
self.running = False # Program is not running anymore
if self.solutions: # If at least 1 solution has been found
self.file_submenu.entryconfig(2, state=tkinter.NORMAL) # Re-enables the save as functionality
else: # If no solutions have been found
self.__update_solved_grids() # Updates the solved solutions text widget
self.option_submenu.entryconfig(0, state=tkinter.NORMAL) # Enables animations delay setting
self.stop_btn.config(state=tkinter.DISABLED) # Disables stop button at the end of execution
self.reset_btn.config(state=tkinter.NORMAL) # Enables the reset button
self.loading_bar.stop() # Stops the loading bar animation
if not self.interrupted: # Displays all solutions only if it was not interrupted
self.status_bar.config(text='Execution successful. Please reset grid.', fg='white') # Updates status bar
if self.autosave.get() and self.solutions: # If autosave is on and at least 1 solution has been found
self.__save() # Save the results
else: # If program was interrupted
self.status_bar.config(text='Execution interrupted. Please reset grid.', fg='white') # Updates status bar
def __stop(self):
'Interrupts the dynamic solving of the grid'
self.running = False # Disallowes the solver thread from running
def __reset(self):
'Resets the graphical user interface to its initial state'
self.file_submenu.entryconfig(0, state=tkinter.NORMAL) # Enables the load functionality when program is reset
self.file_submenu.entryconfig(2, state=tkinter.DISABLED) # Disables the save as functionality when program is reset
self.start_btn.config(state=tkinter.NORMAL) # Re-enables the start button
self.reset_btn.config(state=tkinter.DISABLED) # Disables the reset ability
self.solutions = [] # Resets all the found solutions
self.loaded_grid = None # Forgets the loaded grid
self.modify = True # Re-enables the modify flag to enable grid modification
self.row, self.col = None, None # Resets the currently selected cell row and column
self.canvas.delete('cursor') # Deletes the previous cursor
self.solved_grids_display.config(state=tkinter.NORMAL) # Temporarily enables widget
self.solved_grids_display.delete(1.0, 'end') # Clears the entire solved solutions text widget
self.solved_grids_display.config(state=tkinter.DISABLED) # Disables widget again
self.__update_grid(self.empty_grid) # Displays the empty grid
self.status_bar.config(text='Reset complete.', fg='white') # Updates the status bar
### LOGIC HANDLING METHODS
def __solve_grid(self):
'''Solves the grid in self.grid and stores each solution as a list in self.solutions; displays each iteration of the solving algorithm
Returns True if process was interrupted or False if process was not interrupted'''
for ypos, row in enumerate(self.grid): # Goes through each row in the grid
for xpos, position in enumerate(row): # Goes through each position in the row
if position == 0: # Position must be empty
for num in range(1,10): # Tries all numbers from 1 to 9
if self.delay.get(): # If animation is set to be delayed
time.sleep(0.1)
if not self.running: # If it was interrupted
return True # Returns True; it was interrupted
if self.__possible(xpos, ypos, num): # Check if the number is a possible
self.grid[ypos][xpos] = num # Puts possible number in empty space
self.__display_number(ypos, xpos, num)
self.__solve_grid() # Keeps solving
self.grid[ypos][xpos] = 0 # If program reaches here, no further numbers can be put into the grid and the square is reset
self.__display_number(ypos, xpos, None) # Empties the sudoku square
return False # No possible solution has been found for an empty position; Exits function by returning None as it was not interrupted
# If program reaches this point, there are no more empty spaces in the grid and a solution has been found
deepcopy_grid = copy.deepcopy(self.grid) # A copy of the original grid is made
self.solutions.append(deepcopy_grid) # Solution added to list of solutions
self.__update_solved_grids() # Updates the solved solutions text widget
def __possible(self, x, y, n):
'''Returns True or False if a number can fit in a specific position in self.grid
Takes x position, y position, and value of a possible number as arguments'''
# Checks row
for position in self.grid[y]:
if position == n:
return False
# Checks column
for row in self.grid:
if row[x] == n:
return False
# Checks square
ranges = [range(0,3), range(3,6), range(6,9)] # Possible grid ranges
xrange = None # Stores the ranges that x and y are in
yrange = None
for possible_range in ranges:
if x in possible_range:
xrange = possible_range # If x fits in the range, the range is stored
if y in possible_range:
yrange = possible_range # If y fits in the range, the range is stored
for row in self.grid[yrange[0]:yrange[-1]+1]:
for position in row[xrange[0]:xrange[-1]+1]:
if position == n: # Checks every position in the square
return False
return True # No doubles detected
### VALIDATION METHODS
def __validate_selected_grid(self):
'Validates self.grid by making sure the value placement is correct and that at least 17 values have been entered; returns True or False if grid is valid'
count = 0 # Stores the valid grid clue count
for ypos, row in enumerate(self.grid): # Goes through each row in the grid
| |
#!/usr/bin/env python3
import argparse
import tempfile
import logging
import difflib
import glob
import json
import sys
import os
import re
from pprint import pformat
from .counter import PapersForCount, SenateCounter
from .aecdata import CandidateList, SenateATL, SenateBTL, FormalPreferences
from .common import logger
from .results import JSONResults
class SenateCountPost2015:
disable_bulk_exclusions = True
def __init__(self, state_name, get_input_file, **kwargs):
self.candidates = CandidateList(state_name,
get_input_file('all-candidates'),
get_input_file('senate-candidates'))
self.tickets_for_count = PapersForCount()
self.s282_candidates = kwargs.get('s282_candidates')
self.s282_method = kwargs.get('s282_method')
self.max_ballots = kwargs['max_ballots'] if 'max_ballots' in kwargs else None
self.remove_candidates = None
self.remove_method = kwargs.get('remove_method')
remove = kwargs.get('remove_candidates')
if remove:
self.remove_candidates = [self.candidates.get_candidate_id(*t) for t in remove]
def atl_flow(form):
by_pref = {}
for pref, group in zip(form, self.candidates.groups):
if pref is None:
continue
if pref not in by_pref:
by_pref[pref] = []
by_pref[pref].append(group)
prefs = []
for i in range(1, len(form) + 1):
at_pref = by_pref.get(i)
if not at_pref or len(at_pref) != 1:
break
the_pref = at_pref[0]
for candidate in the_pref.candidates:
candidate_id = candidate.candidate_id
prefs.append(candidate_id)
if not prefs:
return None
return prefs
def btl_flow(form):
by_pref = {}
for pref, candidate in zip(form, self.candidates.candidates):
if pref is None:
continue
if pref not in by_pref:
by_pref[pref] = []
by_pref[pref].append(candidate.candidate_id)
prefs = []
for i in range(1, len(form) + 1):
at_pref = by_pref.get(i)
if not at_pref or len(at_pref) != 1:
break
candidate_id = at_pref[0]
prefs.append(candidate_id)
# must have unique prefs for 1..6, or informal
if len(prefs) < 6:
return None
return prefs
def resolve_non_s282(atl, btl):
"resolve the formal form from ATL and BTL forms. BTL takes precedence, if formal"
return btl_flow(btl) or atl_flow(atl)
def resolve_s282_restrict_form(atl, btl):
"resolve the formal form as for resolve_non_s282, but restrict to s282 candidates"
expanded = btl_flow(btl) or atl_flow(atl)
restricted = [candidate_id for candidate_id in expanded if candidate_id in self.s282_candidates]
if len(restricted) == 0:
return None
return restricted
def resolve_remove_candidates(atl, btl, min_candidates):
"resolve the formal form, removing the listed candidates from eligibiity"
restricted = None
btl_expanded = btl_flow(btl)
if btl_expanded:
restricted = [candidate_id for candidate_id in btl_expanded if candidate_id not in self.remove_candidates]
if min_candidates is not None and len(restricted) < min_candidates:
restricted = None
if restricted is None:
atl_expanded = atl_flow(atl)
if atl_expanded:
restricted = [candidate_id for candidate_id in atl_expanded if candidate_id not in self.remove_candidates]
if len(restricted) == 0:
restricted = None
return restricted
def resolve_s282_restrict_form_with_savings(atl, btl):
"resolve the formal form as for resolve_non_s282, but restrict to s282 candidates"
restricted = None
# if we were formal BTL in a non-s282 count, restrict the form. if at least one
# preference, we're formal
btl_expanded = btl_flow(btl)
if btl_expanded:
restricted = [candidate_id for candidate_id in btl_expanded if candidate_id in self.s282_candidates]
if len(restricted) == 0:
restricted = None
# if, before or after restriction, we are not formal BTL, try restricting the ATL form
if restricted is None:
atl_expanded = atl_flow(atl)
if atl_expanded:
restricted = [candidate_id for candidate_id in atl_expanded if candidate_id in self.s282_candidates]
if len(restricted) == 0:
restricted = None
return restricted
atl_n = len(self.candidates.groups)
btl_n = len(self.candidates.candidates)
assert(atl_n > 0 and btl_n > 0)
informal_n = 0
n_ballots = 0
resolution_fn = resolve_non_s282
if self.s282_candidates:
if self.s282_method == 'restrict_form':
resolution_fn = resolve_s282_restrict_form
elif self.s282_method == 'restrict_form_with_savings':
resolution_fn = resolve_s282_restrict_form_with_savings
else:
raise Exception("unknown s282 method: `%s'" % (self.s282_method))
if self.remove_candidates:
if self.remove_method == 'relaxed':
resolution_fn = lambda atl, btl: resolve_remove_candidates(atl, btl, None)
elif self.remove_method == 'strict':
resolution_fn = lambda atl, btl: resolve_remove_candidates(atl, btl, 6)
# the (extremely) busy loop reading preferences and expanding them into
# forms to be entered into the count
for raw_form, count in FormalPreferences(get_input_file('formal-preferences')):
if self.max_ballots and n_ballots >= self.max_ballots:
break
atl = raw_form[:atl_n]
btl = raw_form[atl_n:]
form = resolution_fn(atl, btl)
if form is not None:
self.tickets_for_count.add_ticket(tuple(form), count)
else:
informal_n += count
n_ballots += count
# slightly paranoid check, but outside the busy loop
assert(len(raw_form) == atl_n + btl_n)
if informal_n > 0:
logger.info("%d ballots are informal and were excluded from the count" % (informal_n))
def get_papers_for_count(self):
return self.tickets_for_count
def get_candidate_ids(self):
candidate_ids = [c.candidate_id for c in self.candidates.candidates]
if self.s282_candidates:
candidate_ids = [t for t in candidate_ids if t in self.s282_candidates]
if self.remove_candidates:
candidate_ids = [t for t in candidate_ids if t not in self.remove_candidates]
return candidate_ids
def get_parties(self):
return dict((c.party_abbreviation, c.party_name)
for c in self.candidates.candidates)
def get_candidate_title(self, candidate_id):
c = self.candidates.candidate_by_id[candidate_id]
return "{}, {}".format(c.surname, c.given_name)
def get_candidate_order(self, candidate_id):
return self.candidates.candidate_by_id[candidate_id].candidate_order
def get_candidate_party(self, candidate_id):
return self.candidates.candidate_by_id[candidate_id].party_abbreviation
class SenateCountPre2015:
disable_bulk_exclusions = False
def __init__(self, state_name, get_input_file, **kwargs):
if 's282_recount' in kwargs:
raise Exception('s282 recount not implemented for pre2015 data')
self.candidates = CandidateList(state_name,
get_input_file('all-candidates'),
get_input_file('senate-candidates'))
self.atl = SenateATL(
state_name,
get_input_file('group-voting-tickets'),
get_input_file('first-preferences'))
self.btl = SenateBTL(get_input_file('btl-preferences'))
def load_tickets(ticket_obj):
if ticket_obj is None:
return
for form, n in ticket_obj.get_tickets():
self.tickets_for_count.add_ticket(form, n)
self.tickets_for_count = PapersForCount()
load_tickets(self.atl)
load_tickets(self.btl)
def get_papers_for_count(self):
return self.tickets_for_count
def get_candidate_ids(self):
return [c.candidate_id for c in self.candidates.candidates]
def get_parties(self):
return dict((c.party_abbreviation, c.party_name)
for c in self.candidates.candidates)
def get_candidate_title(self, candidate_id):
c = self.candidates.candidate_by_id[candidate_id]
return "{}, {}".format(c.surname, c.given_name)
def get_candidate_order(self, candidate_id):
return self.candidates.candidate_by_id[candidate_id].candidate_order
def get_candidate_party(self, candidate_id):
return self.candidates.candidate_by_id[candidate_id].party_abbreviation
def verify_test_logs(verified_dir, test_log_dir):
test_re = re.compile(r'^round_(\d+)\.json')
rounds = []
for fname in os.listdir(verified_dir):
m = test_re.match(fname)
if m:
rounds.append(int(m.groups()[0]))
def fname(d, r):
return os.path.join(d, 'round_%d.json' % r)
def getlog(d, r):
try:
with open(fname(d, r)) as fd:
return json.load(fd)
except FileNotFoundError:
return {}
ok = True
for idx in sorted(rounds):
v = getlog(verified_dir, idx)
t = getlog(test_log_dir, idx)
if v != t:
logger.error("Round %d: FAIL" % (idx))
logger.error("Log should be:")
logger.error(pformat(v))
logger.error("Log is:")
logger.error(pformat(t))
logger.error("Diff:")
logger.error(
'\n'.join(
difflib.unified_diff(
pformat(v).split('\n'),
pformat(t).split('\n'))))
ok = False
else:
logger.debug("Round %d: OK" % (idx))
if ok and len(rounds) > 0:
for fname in os.listdir(test_log_dir):
if test_re.match(fname):
os.unlink(os.path.join(test_log_dir, fname))
os.rmdir(test_log_dir)
return ok
def read_config(config_file):
with open(config_file) as fd:
return json.load(fd)
def cleanup_json(out_dir):
for fname in glob.glob(out_dir + '/*.json'):
logger.debug("cleanup: removing `%s'" % (fname))
os.unlink(fname)
def write_angular_json(config, out_dir):
json_f = os.path.join(out_dir, 'count.json')
with open(json_f, 'w') as fd:
obj = {
'title': config['title']
}
obj['counts'] = [{
'name': count['name'],
'state': count['state'],
'description': count['description'],
'path': count['shortname']}
for count in config['count']]
json.dump(obj, fd, sort_keys=True, indent=4, separators=(',', ': '))
def get_data(input_cls, base_dir, count, **kwargs):
aec_data = count['aec-data']
def input_file(name):
return os.path.join(base_dir, aec_data[name])
return input_cls(count['state'], input_file, **kwargs)
class AutomationException(Exception):
pass
class Automation:
def __init__(self, name, automation_data, count_data):
"""
name: name of this automation instance, for logging
automation_data: list of questions and responses [ [ question, response ], ... ]
"""
self._name = name
self._data = automation_data
self._count_data = count_data
self._upto = 0
def _qstr(self, question):
"we need to cope with a list, or a list of lists"
parts = []
for entry in question:
if type(entry) is list:
parts.append(self._qstr(entry))
else:
parts.append('"%s"<%d>' % (self._count_data.get_candidate_title(entry), entry))
return ', '.join(parts)
def create_callback(self):
"""
create a callback, suitable to be passed to SenateCounter
"""
def __callback(question_posed):
logger.debug("%s: asked to choose between: %s" % (self._name, self._qstr(question_posed)))
if self._upto == self._data:
logger.error("%s: out of automation data, requested to pick between %s" % (self._name, self._qstr(question_posed)))
raise AutomationException("out of automation data")
question_archived, answer = self._data[self._upto]
if question_archived != question_posed:
logger.error("%s: automation data mismatch, expected question `%s', got question `%s'" % (self._name, self._qstr(question_archived), self._qstr(question_posed)))
resp = question_posed.index(answer)
self._upto += 1
return resp
return __callback
def check_complete(self):
if self._upto != len(self._data):
logger.error("%s: not all automation data was consumed (upto %d/%d)" % (self._name, self._upto, len(self._data)))
return False
return True
def json_count_path(out_dir, shortname):
return os.path.join(out_dir, shortname + '.json')
def get_outcome(count, count_data, base_dir, out_dir):
test_logs_okay = True
test_log_dir = None
if 'verified' in count:
test_log_dir = tempfile.mkdtemp(prefix='dividebatur_tmp')
logger.debug("test logs are written to: %s" % (test_log_dir))
outf = json_count_path(out_dir, count['shortname'])
logger.info("counting `%s'. output written to `%s'" % (count['name'], outf))
result_writer = JSONResults(
outf,
test_log_dir,
count_data.get_candidate_ids(),
count_data.get_parties(),
count_data.get_candidate_order,
count_data.get_candidate_title,
count_data.get_candidate_party,
name=count.get('name'),
description=count.get('description'),
house=count['house'],
state=count['state'])
disable_bulk_exclusions = count.get('disable_bulk_exclusions', count_data.disable_bulk_exclusions)
logger.debug("disable bulk exclusions: %s" % (disable_bulk_exclusions))
election_order_auto = Automation('election order', count['election_order_ties'], count_data)
exclusion_tie_auto = Automation('exclusion tie', count['exclusion_ties'], count_data)
election_tie_auto = Automation('election tie', count['election_ties'], count_data)
counter = SenateCounter(
result_writer,
count['vacancies'],
count_data.get_papers_for_count(),
election_order_auto.create_callback(),
exclusion_tie_auto.create_callback(),
election_tie_auto.create_callback(),
count_data.get_candidate_ids(),
count_data.get_candidate_order,
disable_bulk_exclusions)
counter.run()
if any(not t.check_complete() for t in (election_order_auto, exclusion_tie_auto, election_tie_auto)):
logger.error("** Not all automation data consumed. Failed. **")
sys.exit(1)
if test_log_dir is not None:
if not verify_test_logs(os.path.join(base_dir, count['verified']), test_log_dir):
test_logs_okay = False
if not test_logs_okay:
logger.error("** TESTS FAILED **")
sys.exit(1)
return (outf, result_writer.summary())
def get_input_method(format):
# determine | |
"""Module that contains tests to check that xml file has been written"""
import os
from pathlib import Path
from conftest import get_jarvis4se, remove_xml_file
from xml_adapter import XmlParser3SE
jarvis4se = get_jarvis4se()
xml_parser = XmlParser3SE()
def test_generate_xml_file_template():
"""Notebook equivalent:
%%jarvis
with generate_xml_file_template
"""
file_name = "generate_xml_file_template"
jarvis4se.jarvis("", f"with {file_name}\n")
path = Path(os.path.join("./", file_name + ".xml"))
with path as file:
read_xml = file.read_text(encoding="utf-8")
base_xml = "<?xml version='1.0' encoding='UTF-8'?>\n" \
"<systemAnalysis>\n" \
" <funcArch>\n" \
" <functionList/>\n" \
" <dataList/>\n" \
" <stateList/>\n" \
" <transitionList/>\n" \
" <functionalElementList/>\n" \
" <functionalInterfaceList/>\n" \
" </funcArch>\n" \
" <phyArch>\n" \
" <physicalElementList/>\n" \
" <physicalInterfaceList/>\n" \
" </phyArch>\n" \
" <viewPoint>\n" \
" <viewList/>\n" \
" <attributeList/>\n" \
" <typeList/>\n" \
" </viewPoint>\n" \
"</systemAnalysis>\n"
assert base_xml in read_xml
remove_xml_file(file_name)
def test_simple_function_within_xml():
"""Notebook equivalent:
%%jarvis
with simple_function_within_xml
F1 is a function
"""
file_name = "simple_function_within_xml"
jarvis4se.jarvis("", f"with {file_name}\n"
"F1 is a function\n")
function_list = xml_parser.parse_xml(file_name + ".xml")['xml_function_list']
assert len(function_list) == 1
assert [fun.name == "F1" for fun in function_list]
remove_xml_file(file_name)
def test_described_attribute_within_xml(attribute_cell):
"""Same as test_described_attribute_input() within test_input_cell.py, but here we are
verifying that attributes are written correctly within xml:
%%jarvis
with described_attribute_within_xml
F1 is a function
Fun elem is a functional element
========================================
%%jarvis
with described_attribute_within_xml
A is an attribute
B is an attribute. C is an attribute
========================================
%%jarvis
with described_attribute_within_xml
The A of F1 is 4,2
The C of F1 is pink
The B of Fun elem is 8,5.
The A of Fun elem is 100
"""
file_name = "described_attribute_within_xml"
jarvis4se.jarvis("", f"with {file_name}\n{attribute_cell[0]}")
jarvis4se.jarvis("", f"with {file_name}\n{attribute_cell[1]}")
jarvis4se.jarvis("", f"with {file_name}\n{attribute_cell[2]}")
obj_dict = xml_parser.parse_xml(file_name + ".xml")
expected = {('A', 'F1', '4,2'), ('B', 'Fun elem', '8,5'),
('C', 'F1', 'pink'), ('A', 'Fun elem', '100')}
# xml_adapter.parse_xml() returns mainly set(), so the order can change
# thus we have to compare it with a set also
result = set()
assert len(obj_dict['xml_attribute_list']) == 3
for attribute in obj_dict['xml_attribute_list']:
for item in attribute.described_item_list:
for function in obj_dict['xml_function_list']:
if item[0] == function.id:
result.add((attribute.name, function.name, item[1]))
for fun_elem in obj_dict['xml_fun_elem_list']:
if item[0] == fun_elem.id:
result.add((attribute.name, fun_elem.name, item[1]))
assert expected == result
remove_xml_file(file_name)
def test_set_attribute_type_within_xml():
"""Tests that attribute types are written correctly within xml, notebook equivalent:
%%jarvis
with set_attribute_type_within_xml
A is an attribute
B is an attribute.
The type of A is attribute type A.
The type of B is attribute type B
"""
file_name = "set_attribute_type_within_xml"
jarvis4se.jarvis("", f"with {file_name}\n"
"A is an attribute\n"
"B is an attribute.\n"
"The type of A is attribute type A.\n"
"The type of B is attribute type B\n")
attribute_list = xml_parser.parse_xml(file_name + ".xml")['xml_attribute_list']
expected = {('A', 'attribute type A'), ('B', 'attribute type B')}
# xml_adapter.parse_xml() returns mainly set(), so the order can change
# thus we have to compare it with a set also
result = set()
assert len(attribute_list) == 2
for attribute in attribute_list:
result.add((attribute.name, attribute.type))
assert expected == result
remove_xml_file(file_name)
def test_set_allocated_item_to_view_within_xml(allocation_item_cell):
"""Relative to Issue #9 to add new allocated item to a view(i.e. filter) by verifying than
it's written within xml. Notebook equivalent:
%%jarvis
with set_allocated_item_to_view_within_xml
F1 is a function
F2 with a long name is a function. The alias of F2 with a long name is F2.
F3 is a function
F4 is a function
a is a data
Fun_elem is a functional element
========================================
%%jarvis
with set_allocated_item_to_view_within_xml
under toto
consider F1. consider toto. consider a, Fun_elem
consider tata.
consider F1, F2, F3, F4
"""
file_name = "set_allocated_item_to_view_within_xml"
jarvis4se.jarvis("", f"with {file_name}\n{allocation_item_cell[0]}")
jarvis4se.jarvis("", f"with {file_name}\n{allocation_item_cell[1]}")
obj_dict = xml_parser.parse_xml(file_name + ".xml")
expected = {'F1', 'F2 with a long name', 'F3', 'F4', 'a', 'Fun_elem'}
# xml_adapter.parse_xml() returns mainly set(), so the order can change
# thus we have to compare it with a set also
result = set()
assert len(obj_dict['xml_view_list']) == 1
assert "test_view" in {i.name for i in obj_dict['xml_view_list']}
for item in next(iter(obj_dict['xml_view_list'])).allocated_item_list:
for fun in obj_dict['xml_function_list']:
if item == fun.id:
result.add(fun.name)
for fun_elem in obj_dict['xml_fun_elem_list']:
if item == fun_elem.id:
result.add(fun_elem.name)
for data in obj_dict['xml_data_list']:
if item == data.id:
result.add(data.name)
assert expected == result
remove_xml_file(file_name)
def test_function_with_grandkids_within_xml(function_grandkids_cell):
"""See Issue #31, Notebook equivalent:
%%jarvis
with function_with_grandkids_within_xml
F1 is a function
F1a is a function
F1a1 is a function
F1 is composed of F1a
F1a is composed of F1a1
a is a data
F1a produces a
b is a data
F1a consumes b
c is a data
F1a1 produces c
d is a data
F1a1 consumes d
"""
file_name = "function_with_grandkids_within_xml"
jarvis4se.jarvis("", f"with {file_name}\n{function_grandkids_cell}")
obj_dict = xml_parser.parse_xml(file_name + ".xml")
expected_cons = {('b', 'F1a'), ('d', 'F1'), ('b', 'F1'), ('d', 'F1a'), ('d', 'F1a1')}
expected_prod = {('c', 'F1a1'), ('a', 'F1'), ('c', 'F1'), ('c', 'F1a'), ('a', 'F1a')}
expected_child = {('F1', 'F1a'), ('F1a', 'F1a1')}
# xml_adapter.parse_xml() returns mainly set(), so the order can change
# thus we have to compare it with a set also
result_cons = set()
result_prod = set()
result_child = set()
assert len(obj_dict['xml_data_list']) == 4 and len(obj_dict['xml_function_list']) == 3
assert (len(obj_dict['xml_consumer_function_list']) and
len(obj_dict['xml_producer_function_list'])) == 5
for cons in obj_dict['xml_consumer_function_list']:
result_cons.add((cons[0], cons[1].name))
for prod in obj_dict['xml_producer_function_list']:
result_prod.add((prod[0], prod[1].name))
for fun in obj_dict['xml_function_list']:
if fun.child_list:
for child in fun.child_list:
result_child.add((fun.name, child.name))
assert expected_cons == result_cons
assert expected_prod == result_prod
assert expected_child == result_child
remove_xml_file(file_name)
def test_function_childs_cons_prod_within_xml(function_with_childs_cell):
"""See Issue #5, Notebook equivalent:
%%jarvis
with function_childs_cons_prod_within_xml
F1 is a function
F1a is a function
F1b is a function
F1c is a function
F1d is a function
F1e is a function
F2 is a function
F3 is a function
F1 is composed of F1a
F1 is composed of F1b
F1 is composed of F1c
F1 is composed of F1d
F1 is composed of F1e
a is a data
F1 produces a
F2 consumes a
F1a produces a
F1b consumes a
b is a data
F1c produces b
F1d consumes b
c is a data
F3 produces c
F1e consumes c
"""
file_name = "function_childs_cons_prod_within_xml"
jarvis4se.jarvis("", f"with {file_name}\n"
f"{function_with_childs_cell}")
obj_dict = xml_parser.parse_xml(file_name + ".xml")
expected_cons = {('a', 'F1b'), ('b', 'F1d'), ('a', 'F2'), ('c', 'F1e'), ('c', 'F1')}
expected_prod = {('b', 'F1c'), ('c', 'F3'), ('a', 'F1a'), ('a', 'F1')}
expected_child = {('F1', 'F1e'), ('F1', 'F1d'), ('F1', 'F1c'), ('F1', 'F1b'), ('F1', 'F1a')}
# xml_adapter.parse_xml() returns mainly set(), so the order can change
# thus we have to compare it with a set also
result_cons = set()
result_prod = set()
result_child = set()
assert len(obj_dict['xml_data_list']) == 3 and len(obj_dict['xml_function_list']) == 8
assert len(obj_dict['xml_consumer_function_list']) == 5 and \
len(obj_dict['xml_producer_function_list']) == 4
for cons in obj_dict['xml_consumer_function_list']:
result_cons.add((cons[0], cons[1].name))
for prod in obj_dict['xml_producer_function_list']:
result_prod.add((prod[0], prod[1].name))
for fun in obj_dict['xml_function_list']:
if fun.child_list:
for child in fun.child_list:
result_child.add((fun.name, child.name))
assert expected_cons == result_cons
assert expected_prod == result_prod
assert expected_child == result_child
remove_xml_file(file_name)
def test_functional_interface_within_xml():
"""Notebook equivalent:
%%jarvis
with functional_interface_within_xml
Color is an attribute
A is a data
F1 is a function
F2 is a function
Fun_elem_1 is a functional element
Fun_elem_2 is a functional element
F1 produces A
F2 consumes A
Fun_elem_1 allocates F1
Fun_elem_2 allocates F2
Fun_inter is a functional interface.
The type of Fun_inter is a_type
The alias of Fun_inter is FI
The Color of Fun_inter is pink
Fun_elem_1 exposes Fun_inter
Fun_elem_2 exposes Fun_inter
Fun_inter allocates A.
"""
file_name = "functional_interface_within_xml"
jarvis4se.jarvis("", f"with {file_name}\n"
"Color is an attribute\n"
"A is a data\n"
"F1 is a function\n"
"F2 is a function\n"
"Fun_elem_1 is a functional element\n"
"Fun_elem_2 is a functional element\n"
"F1 produces A\n"
"F2 consumes A\n"
"Fun_elem_1 allocates F1\n"
"Fun_elem_2 allocates F2\n"
"Fun_inter is a functional interface.\n"
"The type of Fun_inter is functional interface\n"
"The alias of Fun_inter is FI\n"
"The Color of Fun_inter is pink\n"
"Fun_elem_1 exposes Fun_inter\n"
"Fun_elem_2 exposes Fun_inter\n"
"Fun_inter allocates A.\n")
obj_dict = xml_parser.parse_xml(file_name + ".xml")
assert (len(obj_dict['xml_data_list']) == len(obj_dict['xml_attribute_list']) ==
len(obj_dict['xml_fun_inter_list'])) == 1
data = obj_dict['xml_data_list'].pop()
fun_inter = obj_dict['xml_fun_inter_list'].pop()
attribute = obj_dict['xml_attribute_list'].pop()
assert data.name == 'A'
assert fun_inter.name == 'Fun_inter'
assert fun_inter.alias == 'FI'
assert fun_inter.type == 'Functional interface'
assert attribute.name == 'Color'
described_item = attribute.described_item_list.pop()
assert described_item[0] == fun_inter.id and described_item[1] == 'pink'
assert fun_inter.allocated_data_list.pop() == data.id
remove_xml_file(file_name)
def test_fun_elem_exposes_interface_within_xml(fun_elem_exposing_cell):
"""Notebook equivalent:
%%jarvis
with fun_elem_exposes_interface_within_xml
Fun_inter is a functional interface
Fun_elem is | |
<filename>clients/utils/geometry.py
import copy
import json
import re
from bisect import bisect_left
from decimal import Decimal
from itertools import product
from math import cos, fabs, radians, sqrt
from parserutils.numbers import is_number
from pyproj import Proj, transform
from pyproj.exceptions import CRSError
from ..exceptions import BadExtent, BadSpatialReference
EXTENT_KEYS = ("xmin", "ymin", "xmax", "ymax")
SPATIAL_REF_KEYS = ("wkid", "wkt", "srs")
GLOBAL_EXTENT_WEB_MERCATOR = (-20037508.342789244, -20037342.166152496, 20037508.342789244, 20037342.16615247)
GLOBAL_EXTENT_WGS84 = (-180.0, -90.0, 180.0, 90.0)
GLOBAL_EXTENT_WGS84_CORRECTED = (-180.0, -85.0511, 180.0, 85.0511)
SQL_BOX_REGEX = re.compile("BOX\((.*) (.*),(.*) (.*)\)")
def extract_significant_digits(number):
is_negative = number < 0
if is_negative:
number = 0 - number
dijits = 0
if 0.1 <= number < 10:
dijits = 1
elif 10 <= number < 100:
dijits = 0
elif number >= 100:
dijits = -1
rounded = round(number, dijits)
if dijits < 1:
rounded = int(rounded) # In this case, we have a whole number, so return that
if is_negative:
rounded = 0 - rounded
return rounded
def union_extent(extents):
extents = [e for e in (extents or "") if e]
if not extents:
return None
if any(not isinstance(e, Extent) for e in extents):
extent_types = ", ".join(type(e).__name__ for e in extents)
raise ValueError(f'Invalid extent type: expected Extent, got "{extent_types}"')
extent = extents[0].clone()
for next_extent in extents[1:]:
extent.xmin = min(extent.xmin, next_extent.xmin)
extent.ymin = min(extent.ymin, next_extent.ymin)
extent.xmax = max(extent.xmax, next_extent.xmax)
extent.ymax = max(extent.ymax, next_extent.ymax)
return extent
class Extent(object):
""" Provides easy handling of extent through various functions below, and abstract out ESRI / WMS differences """
def __init__(self, extent, spatial_reference=None):
self.xmin = None
self.ymin = None
self.xmax = None
self.ymax = None
self._original_format = None
if isinstance(extent, str):
extent = json.loads(extent)
if isinstance(extent, dict):
self._original_format = "dict"
try:
for key in EXTENT_KEYS:
setattr(self, key, extent[key])
except KeyError:
raise BadExtent("Invalid extent: missing required keys", extent=extent)
if spatial_reference is None:
spatial_reference = extent.get("spatialReference", extent.get("spatial_reference"))
elif isinstance(extent, (list, tuple)):
self._original_format = "list"
try:
self.xmin = extent[0]
self.ymin = extent[1]
self.xmax = extent[2]
self.ymax = extent[3]
except IndexError:
raise BadExtent("Invalid extent: insufficient length", extent=extent)
elif all(hasattr(extent, prop) for prop in EXTENT_KEYS):
self._original_format = "obj"
for key in EXTENT_KEYS:
setattr(self, key, getattr(extent, key))
if spatial_reference is None:
spatial_reference = getattr(extent, "spatial_reference", None)
else:
extent_type = type(extent).__name__
raise BadExtent(
f"Invalid extent: must be dict, tuple or compatible object, not {extent_type}",
extent=extent
)
if any(not is_number(coord) for coord in (self.xmin, self.ymin, self.xmax, self.ymax)):
raise BadExtent("Invalid extent coordinates", extent=extent)
else:
self.xmin, self.ymin, self.xmax, self.ymax = (
float(self.xmin),
float(self.ymin),
float(self.xmax),
float(self.ymax)
)
if spatial_reference is None:
raise BadSpatialReference("Spatial reference required for Extent", extent=extent)
else:
self.spatial_reference = SpatialReference(spatial_reference)
def __repr__(self):
return self.as_json_string()
def clone(self):
return copy.deepcopy(self)
def as_dict(self, esri_format=True, precision=None):
extent = {}
for key in EXTENT_KEYS:
val = getattr(self, key)
extent[key] = val if precision is None else round(val, precision)
if self.spatial_reference:
srs_key = "spatialReference" if esri_format else "spatial_reference"
extent[srs_key] = self.spatial_reference.as_dict(esri_format)
return extent
def as_list(self, precision=None):
extent = (getattr(self, key) for key in EXTENT_KEYS)
if precision is None:
return list(extent)
else:
return [round(val, precision) for val in extent]
def as_original(self, esri_format=True, precision=None):
if self._original_format == "dict":
return self.as_dict(esri_format, precision)
elif self._original_format == "list":
return self.as_list(precision)
elif precision is not None:
return Extent(self.as_dict(esri_format, precision))
else:
return self
def as_bbox_string(self, precision=4):
return ",".join(str(xy) for xy in self.as_list(precision))
def as_json_string(self, esri_format=True, precision=4):
return json.dumps(self.as_dict(esri_format, precision), sort_keys=True)
def limit_to_global_extent(self):
if not self.spatial_reference.is_web_mercator():
raise ValueError("Extent must be Web Mercator in order to limit global extent")
new_extent = self.clone()
new_extent.xmin = max(GLOBAL_EXTENT_WEB_MERCATOR[0], new_extent.xmin)
new_extent.ymin = max(GLOBAL_EXTENT_WEB_MERCATOR[1], new_extent.ymin)
new_extent.xmax = min(GLOBAL_EXTENT_WEB_MERCATOR[2], new_extent.xmax)
new_extent.ymax = min(GLOBAL_EXTENT_WEB_MERCATOR[3], new_extent.ymax)
return new_extent
def limit_to_global_width(self):
if not self.spatial_reference.is_web_mercator():
raise ValueError("Extent must be Web Mercator in order to limit global width")
new_extent = self.clone()
new_extent.xmin = max(GLOBAL_EXTENT_WEB_MERCATOR[0], new_extent.xmin)
new_extent.xmax = min(GLOBAL_EXTENT_WEB_MERCATOR[2], new_extent.xmax)
return new_extent
def crosses_anti_meridian(self):
if not self.spatial_reference.is_web_mercator():
raise ValueError("Extent must be Web Mercator in order to test antimeridian")
return self.xmin < GLOBAL_EXTENT_WEB_MERCATOR[0] or self.xmax > GLOBAL_EXTENT_WEB_MERCATOR[2]
def has_negative_extent(self):
if not self.spatial_reference.is_web_mercator():
raise ValueError("Extent must be Web Mercator in order to test for negative extent")
return self.xmin < GLOBAL_EXTENT_WEB_MERCATOR[0]
def get_negative_extent(self):
"""
Extents normalized by ArcGIS on the front end may have a negative extent for the area to the left of the
meridian. This method returns that component as an extent that can be sent to image retrieval routines.
The negative_extent will have the same height and same width as the original extent, but the xmin and
xmax will be different.
"""
if self.has_negative_extent():
new_extent = self.clone()
new_extent.xmin = GLOBAL_EXTENT_WEB_MERCATOR[2] - (GLOBAL_EXTENT_WEB_MERCATOR[0] - self.xmin)
new_extent.xmax = new_extent.xmin + self.get_dimensions()[0]
return new_extent
def fit_to_dimensions(self, width, height):
""" Expand self as necessary to fit dimensions of image """
new_extent = self.clone()
img_aspect_ratio = float(height) / float(width)
x_diff, y_diff = new_extent.get_dimensions()
extent_aspect_ratio = 1 if x_diff == 0 else y_diff / x_diff
if img_aspect_ratio > extent_aspect_ratio:
# img is taller than extent
diff_extent_units = ((img_aspect_ratio * x_diff) - y_diff) / 2.0
new_extent.ymin -= diff_extent_units
new_extent.ymax += diff_extent_units
elif img_aspect_ratio < extent_aspect_ratio:
# img is wider than extent
diff_extent_units = ((y_diff / img_aspect_ratio) - x_diff) / 2.0
new_extent.xmin -= diff_extent_units
new_extent.xmax += diff_extent_units
return new_extent
def fit_image_dimensions_to_extent(self, width, height, target_resolution=None):
"""
Return image dimensions that fit the extent's aspect ratio.
If target_resolution is provided, use that for calculating dimensions instead (useful for WMS client)
"""
resolution = target_resolution or self.get_image_resolution(width, height)
img_aspect_ratio = float(height) / float(width)
x_diff, y_diff = self.get_dimensions()
extent_aspect_ratio = y_diff / x_diff
if img_aspect_ratio > extent_aspect_ratio:
# img is taller than extent
diff_extent_units = ((img_aspect_ratio * x_diff) - y_diff) / 2.0
offset_pixels = int(round(diff_extent_units / resolution, 0))
height = height - 2 * offset_pixels
elif img_aspect_ratio < extent_aspect_ratio:
# img is wider than extent
diff_extent_units = ((y_diff / img_aspect_ratio) - x_diff) / 2.0
offset_pixels = int(round(diff_extent_units / resolution, 0))
width = width - 2 * offset_pixels
return width, height
def get_image_resolution(self, width, height):
x_diff, y_diff = self.get_dimensions()
return sqrt((x_diff * y_diff) / (width * height))
def project_to_web_mercator(self):
""" Project self to Web Mercator (only some ESRI extents are valid here) """
new_extent = self.clone()
if self.spatial_reference.is_web_mercator():
return new_extent
if not self.spatial_reference.is_valid_proj4_projection():
raise ValueError("Spatial reference is not valid for proj4, must use a different service to project")
new_extent.xmin, new_extent.ymin, new_extent.xmax, new_extent.ymax = self._get_projected_extent("EPSG:3857")
new_extent.spatial_reference.wkid = 3857
new_extent.spatial_reference.srs = "EPSG:3857"
return new_extent
def project_to_geographic(self):
""" Project self to geographic (only some ESRI extents are valid here) """
new_extent = self.clone()
if self.spatial_reference.is_geographic():
return new_extent
if not self.spatial_reference.is_valid_proj4_projection():
raise ValueError("Spatial reference is not valid for proj4, must use a different service to project")
new_extent.xmin, new_extent.ymin, new_extent.xmax, new_extent.ymax = self._get_projected_extent("EPSG:4326")
new_extent.spatial_reference.wkid = 4326
new_extent.spatial_reference.srs = "EPSG:4326"
return new_extent
def _correct_for_projection(self):
if self.spatial_reference.is_web_mercator():
self.spatial_reference.srs = "EPSG:3857"
elif self.spatial_reference.wkid:
self.spatial_reference.srs = f"EPSG:{self.spatial_reference.wkid}"
# Apply y-axis corrections to avoid singularities in projection at latitude -90 or 90
if self.spatial_reference.srs == "EPSG:4326":
# Corrections applied as per open layers convention
if self.ymin <= -90:
self.ymin = -85.0511
if self.ymax >= 90:
self.ymax = 85.0511
def _get_projected_extent(self, target_srs):
"""
Densifies the edges with edge_points points between corners, and projects all of them.
Geographic latitudes must first be bounded to the following or calculations will fail!
-85.0511 <= y <= 85.0511
:return: the outer bounds of the projected coordinates.
"""
self._correct_for_projection()
source_srs = self.spatial_reference.srs
from_epsg = source_srs.strip().upper().startswith("EPSG:")
try:
source_proj = Proj(init=source_srs) if from_epsg else Proj(str(source_srs))
target_proj = Proj(init=target_srs) if ":" in target_srs else Proj(str(target_srs))
except CRSError:
raise BadSpatialReference(f"Invalid SRS value: {source_srs}")
edge_points = 9
samples = list(range(0, edge_points))
x_diff, y_diff = self.get_dimensions()
xstep = x_diff / (edge_points - 1)
ystep = y_diff / (edge_points - 1)
x_values, y_values = [], []
for i, j in product(samples, samples):
x_values.append(self.xmin + xstep * i)
y_values.append(self.ymin + ystep * j)
# TODO: check for bidirectional consistency, as is done in ncserve BoundingBox.project() method
x_values, y_values = transform(source_proj, target_proj, x_values, y_values)
projected_values = min(x_values), min(y_values), max(x_values), max(y_values)
if any(not is_number(coord) for coord in projected_values):
raise ValueError(f'Invalid projection coordinates for "{source_srs}": {projected_values}')
return projected_values
def get_scale_string(self, image_width):
"""
This is modified to use the extent's southern latitude to mimic how ArcGIS displays the front end scale.
:return: string representation of the current | |
atom and residues for common group
Adenin_Common_combine_Lig_Res_H_distance.setdefault('%s'%atmH,[]).append(distanceH)#creating dictionary with all lig atom and distance for table
Adenin_Common_combine_Lig_Res_H_distance_uniquify={k:list(set(j)) for k,j in Adenin_Common_combine_Lig_Res_H_distance.items()}
Adenin_CommonH_Lig_Resdict_distance['%s'%id]=Adenin_Common_combine_Lig_Res_H_distance_uniquify#final dic for table with pdb id , lig atom and distance for all group
else:
if line.startswith(('0', '1', '2', '3', '4', '5', '6', '7', '8', '9')):
#print "NHB", line
lineNH=line.split()
lignameNH=lineNH[9]
atmNH=lineNH[8]
resNH=lineNH[3]
residuenumNH=lineNH[4]
distanceNH=lineNH[12]
resnumNH=resNH+residuenumNH
# #appending each residue and its position to list called lresidue
lresidueNH.append(resnumNH)
#appending each ligand atom to list called latom
latomNH.append(atmNH)
#appending distance of each interaction to ldistance
ldistanceNH.append(distanceNH)
#creating a set for residue with position
residue_seenNH.add(resnumNH)
#creating a set for each ligand atom
atom_seenNH.add(atmNH)
#making a dictionary with list comtaining residue name and position
residueNH.setdefault('%s'%id,[]).append(resnumNH)
#making a dictionary with list comataing ligand atoms
atmnameNH.setdefault('%s'%id,[]).append(atmNH)
if atmNH in METHI:
METHI_lresidueNH.append(resnumNH)
METHI_latomNH.append(atmNH)
METHI_graphdicNH.setdefault('%s'%atmNH,[]).append(resnumNH)
METHI_All_combine_Lig_Res_NH.setdefault('%s'%atmNH,[]).append(resnumNH)#creating dictionary with all lig atom and residues for table
METHI_All_combine_Lig_Res_NH_uniquify= {k:list(set(j)) for k,j in METHI_All_combine_Lig_Res_NH.items()}
METHI_allNH_Lig_Resdict['%s'%id]=METHI_All_combine_Lig_Res_NH_uniquify#final dic for table with pdb id , lig atom and residues for all group
METHI_All_combine_Lig_Res_NH_distance.setdefault('%s'%atmNH,[]).append(distanceNH)#creating dictionary with all lig atom and distance for table
METHI_All_combine_Lig_Res_NH_distance_uniquify= {k:list(set(j)) for k,j in METHI_All_combine_Lig_Res_NH_distance.items()}
METHI_allNH_Lig_Resdict_distance['%s'%id]=METHI_All_combine_Lig_Res_NH_distance_uniquify#final dic for table with pdb id , lig atom and distance for all group
if atmNH in NONHcommon_intersectionfinal:
METHI_common_graphdicNH.setdefault('%s'%atmNH,[]).append(resnumNH)
METHI_Common_combine_Lig_Res_NH.setdefault('%s'%atmNH,[]).append(resnumNH)#creating dictionary with common lig atom and residues for table
METHI_Common_combine_Lig_Res_NH_uniquify= {k:list(set(j)) for k,j in METHI_Common_combine_Lig_Res_NH.items()}
METHI_CommonNH_Lig_Resdict['%s'%id]=METHI_Common_combine_Lig_Res_NH_uniquify#final dic for table with pdb id , lig atom and residues for common group
METHI_Common_combine_Lig_Res_NH_distance.setdefault('%s'%atmNH,[]).append(distanceNH)#creating dictionary with all lig atom and distance for table
METHI_Common_combine_Lig_Res_NH_distance_uniquify={k:list(set(j)) for k,j in METHI_Common_combine_Lig_Res_NH_distance.items()}
METHI_CommonNH_Lig_Resdict_distance['%s'%id]=METHI_Common_combine_Lig_Res_NH_distance_uniquify#final dic for table with pdb id , lig atom and distance for all group
if atmNH in Ribose:
Ribose_graphdicNH.setdefault('%s'%atmNH,[]).append(resnumNH)
Ribose_All_combine_Lig_Res_NH.setdefault('%s'%atmNH,[]).append(resnumNH)#creating dictionary with all lig atom and residues for table
Ribose_All_combine_Lig_Res_NH_uniquify= {k:list(set(j)) for k,j in Ribose_All_combine_Lig_Res_NH.items()}
Ribose_allNH_Lig_Resdict['%s'%id]=Ribose_All_combine_Lig_Res_NH_uniquify#final dic for table with pdb id , lig atom and residues for all group
Ribose_All_combine_Lig_Res_NH_distance.setdefault('%s'%atmNH,[]).append(distanceNH)#creating dictionary with all lig atom and distance for table
Ribose_All_combine_Lig_Res_NH_distance_uniquify= {k:list(set(j)) for k,j in Ribose_All_combine_Lig_Res_NH_distance.items()}
Ribose_allNH_Lig_Resdict_distance['%s'%id]=Ribose_All_combine_Lig_Res_NH_distance_uniquify#final dic for table with pdb id , lig atom and distance for all group
if atmNH in NONHcommon_intersectionfinal:
Ribose_common_graphdicNH.setdefault('%s'%atmNH,[]).append(resnumNH)
Ribose_Common_combine_Lig_Res_NH.setdefault('%s'%atmNH,[]).append(resnumNH)#creating dictionary with common lig atom and residues for table
Ribose_Common_combine_Lig_Res_NH_uniquify= {k:list(set(j)) for k,j in Ribose_Common_combine_Lig_Res_NH.items()}
Ribose_CommonNH_Lig_Resdict['%s'%id]=Ribose_Common_combine_Lig_Res_NH_uniquify#final dic for table with pdb id , lig atom and residues for common group
Ribose_Common_combine_Lig_Res_NH_distance.setdefault('%s'%atmNH,[]).append(distanceNH)#creating dictionary with all lig atom and distance for table
Ribose_Common_combine_Lig_Res_NH_distance_uniquify={k:list(set(j)) for k,j in Ribose_Common_combine_Lig_Res_NH_distance.items()}
Ribose_CommonNH_Lig_Resdict_distance['%s'%id]=Ribose_Common_combine_Lig_Res_NH_distance_uniquify#final dic for table with pdb id , lig atom and distance for all group
if atmNH in Adenin:
Adenin_lresidueNH.append(resnumNH)
Adenin_latomNH.append(atmNH)
Adenin_graphdicNH.setdefault('%s'%atmNH,[]).append(resnumNH)
Adenin_All_combine_Lig_Res_NH.setdefault('%s'%atmNH,[]).append(resnumNH)#creating dictionary with all lig atom and residues for table
Adenin_All_combine_Lig_Res_NH_uniquify= {k:list(set(j)) for k,j in Adenin_All_combine_Lig_Res_NH.items()}
Adenin_allNH_Lig_Resdict['%s'%id]=Adenin_All_combine_Lig_Res_NH_uniquify#final dic for table with pdb id , lig atom and residues for all group
Adenin_All_combine_Lig_Res_NH_distance.setdefault('%s'%atmNH,[]).append(distanceNH)#creating dictionary with all lig atom and distance for table
Adenin_All_combine_Lig_Res_NH_distance_uniquify= {k:list(set(j)) for k,j in Adenin_All_combine_Lig_Res_NH_distance.items()}
Adenin_allNH_Lig_Resdict_distance['%s'%id]=Adenin_All_combine_Lig_Res_NH_distance_uniquify#final dic for table with pdb id , lig atom and distance for all group
if atmNH in NONHcommon_intersectionfinal:
Adenin_common_graphdicNH.setdefault('%s'%atmNH,[]).append(resnumNH)
Adenin_Common_combine_Lig_Res_NH.setdefault('%s'%atmNH,[]).append(resnumNH)#creating dictionary with common lig atom and residues for table
Adenin_Common_combine_Lig_Res_NH_uniquify= {k:list(set(j)) for k,j in Adenin_Common_combine_Lig_Res_NH.items()}
Adenin_CommonNH_Lig_Resdict['%s'%id]=Adenin_Common_combine_Lig_Res_NH_uniquify#final dic for table with pdb id , lig atom and residues for common group
Adenin_Common_combine_Lig_Res_NH_distance.setdefault('%s'%atmNH,[]).append(distanceNH)#creating dictionary with all lig atom and distance for table
Adenin_Common_combine_Lig_Res_NH_distance_uniquify={k:list(set(j)) for k,j in Adenin_Common_combine_Lig_Res_NH_distance.items()}
Adenin_CommonNH_Lig_Resdict_distance['%s'%id]=Adenin_Common_combine_Lig_Res_NH_distance_uniquify#final dic for table with pdb id , lig atom and distance for all group
METHI_listdata_H=[]
METHI_listdata_NH=[]
METHI_lresidueH=[]
METHI_latomH=[]
METHI_lresidueNH=[]
METHI_latomNH=[]
METHI_All_combine_Lig_Res_H={}
METHI_Common_combine_Lig_Res_H={}
METHI_All_combine_Lig_Res_NH={}
METHI_Common_combine_Lig_Res_NH={}
METHI_All_combine_Lig_Res_H_distance={}
METHI_All_combine_Lig_Res_NH_distance={}
METHI_Common_combine_Lig_Res_H_distance={}
METHI_Common_combine_Lig_Res_NH_distance={}
Ribose_listdata_H=[]
Ribose_listdata_NH=[]
Ribose_All_combine_Lig_Res_H={}
Ribose_Common_combine_Lig_Res_H={}
Ribose_All_combine_Lig_Res_NH={}
Ribose_Common_combine_Lig_Res_NH={}
Ribose_All_combine_Lig_Res_H_distance={}
Ribose_All_combine_Lig_Res_NH_distance={}
Ribose_Common_combine_Lig_Res_H_distance={}
Ribose_Common_combine_Lig_Res_NH_distance={}
Adenin_lresidueH=[]
Adenin_latomH=[]
Adenin_lresidueNH=[]
Adenin_latomNH=[]
Adenin_listdata_H=[]
Adenin_listdata_NH=[]
Adenin_All_combine_Lig_Res_H={}
Adenin_Common_combine_Lig_Res_H={}
Adenin_All_combine_Lig_Res_NH={}
Adenin_Common_combine_Lig_Res_NH={}
Adenin_All_combine_Lig_Res_H_distance={}
Adenin_All_combine_Lig_Res_NH_distance={}
Adenin_Common_combine_Lig_Res_H_distance={}
Adenin_Common_combine_Lig_Res_NH_distance={}
METHI_common_listdata_H=[]
METHI_common_listdata_NH=[]
Ribose_common_listdata_H=[]
Ribose_common_listdata_NH=[]
Adenin_common_listdata_H=[]
Adenin_common_listdata_NH=[]
lresidueH=[]
latomH=[]
ldistanceH=[]
lresidueNH=[]
latomNH=[]
ldistanceNH=[]
combines_listdata=[]
residue_seenH.clear()
METHI_finalsetH.clear()
METHI_finalsetNH.clear()
Adenin_finalsetH.clear()
Adenin_finalsetNH.clear()
finalsetH.clear()
finalsetNH.clear()
####################Define function for Statistics ################################
def percentage(dictname,subgroup):
Count_Atom={}
percentage_Atom={}
atmlist=[]
if bool(dictname):
for key, value in dictname.iteritems():
for atom in subgroup:
for key1,value1 in value.iteritems():
#for i in dict1.keys():
if atom == key1:
Count_Atom[key1]=1
percentage_Atom['%s'%key]=Count_Atom
#print percent
Count_Atom={}
tabl=pd.DataFrame.from_dict(percentage_Atom).fillna(0)
Num_cols = len (PDBID_LIST)
for atms in percentage_Atom.values():
for atms_key in atms.keys():
atmlist.append(atms_key)
count_atmlist=list(set(atmlist))
tabl['Percentage of Interaction']= (tabl.sum(axis=1)/Num_cols)*100
tabl['Percentage of Interaction']=tabl['Percentage of Interaction'].round(2)
print "<br/>"," No. of Ligand atoms:", len(count_atmlist), "/",len(subgroup), "<br/>"
print tabl.T.to_html(justify='center'),"<br/>"
#print tabl.style.background_gradient(cmap='summer')
#sns.heatmap(tabl['Percentage of Interaction'], annot=True)
Highest_value= tabl['Percentage of Interaction'][tabl['Percentage of Interaction']==tabl['Percentage of Interaction'].max()]
Highest_value=Highest_value.to_dict()
print "Highest percenrage of Interactions identified","<br/>"
Max_tabl=pd.DataFrame(Highest_value.items())
Max_tabl.columns = ['Ligand Atom', 'Percentage']
Max_tabl.rename(index={0: 'Highest'})
#Max_tabl=pd.Series(Highest_value).to_frame()
#Max_tabl.index.rename = 'index'
#Max_tabl.rename(index={0:'zero'}, inplace=True)
#df1.rename(index={0: 'a'})
print Max_tabl.T.to_html(justify='center')
else:
print "No Interactions Observed"
######End of Percentage section###
####Start of Distance section##
#####Start of Distance section###
def distance_calc(dictnames):
DistMean_dict={}
DistFinal_pdb={}
if bool(dictnames):
for key,value in dictnames.iteritems():
for key1,value1 in value.iteritems():
results = map(float, value1)
#print value1, np.mean(results)
mean1=round(np.float64(np.mean(results)), 2)
DistMean_dict[key1]=mean1
DistFinal_pdb[key]=DistMean_dict
DistMean_dict={}
Distance_tabl=pd.DataFrame.from_dict(DistFinal_pdb)
print Distance_tabl.T.to_html(justify='center'),"<br/>"
print Distance_tabl.apply(pd.Series.describe, axis=1)[['count','mean','std']].dropna().round(2).T.to_html(justify='center'),"<br/>"
#Distance_tabl['Standard Deviation']=Distance_tabl.std(axis=1)
#Distance_tabl['Standard Deviation']=Distance_tabl['Standard Deviation'].round(2)
else:
print "No Interactions Observed","<br/>"
#End of distance section##
####################End of Define function for Statistics ################################
aminoacid_code={'CYS': 'C', 'ASP': 'D', 'SER': 'S', 'GLN': 'Q', 'LYS': 'K',
'ILE': 'I', 'PRO': 'P', 'THR': 'T', 'PHE': 'F', 'ASN': 'N',
'GLY': 'G', 'HIS': 'H', 'LEU': 'L', 'ARG': 'R', 'TRP': 'W',
'ALA': 'A', 'VAL':'V', 'GLU': 'E', 'TYR': 'Y', 'MET': 'M'}
### List of filenames for csv download ##########
CSVrandom_name= str(uuid.uuid4())
Adenin_allH='tmp/'+'Adenin_allH' +CSVrandom_name+'.csv'
Adenin_allNH='tmp/'+'Adenin_allNH' +CSVrandom_name+'.csv'
Adenin_CommonH='tmp/'+'Adenin_CommonH' +CSVrandom_name+'.csv'
Adenin_CommonNH='tmp/'+'Adenin_CommonNH' +CSVrandom_name+'.csv'
Ribose_allH='tmp/'+'Ribose_allH' +CSVrandom_name+'.csv'
Ribose_allNH='tmp/'+'Ribose_allNH' +CSVrandom_name+'.csv'
Ribose_CommonH='tmp/'+'Ribose_CommonH' +CSVrandom_name+'.csv'
Ribose_CommonNH='tmp/'+'Ribose_CommonNH' +CSVrandom_name+'.csv'
METHI_allH='tmp/'+'METHI_allH' +CSVrandom_name+'.csv'
METHI_allNH='tmp/'+'METHI_allNH' +CSVrandom_name+'.csv'
METHI_CommonH='tmp/'+'METHI_CommonH' +CSVrandom_name+'.csv'
METHI_CommonNH='tmp/'+'METHI_CommonNH' +CSVrandom_name+'.csv'
#### dict to csv ###
Adenin_allH_df=pd.DataFrame(Adenin_allH_Lig_Resdict)
Adenin_allH_df.to_csv(Adenin_allH)
Adenin_allNH_df=pd.DataFrame(Adenin_allNH_Lig_Resdict)
Adenin_allNH_df.to_csv(Adenin_allNH)
Adenin_CommonH_df=pd.DataFrame(Adenin_CommonH_Lig_Resdict)
Adenin_CommonH_df.to_csv(Adenin_CommonH)
Adenin_CommonNH_df=pd.DataFrame(Adenin_CommonNH_Lig_Resdict)
Adenin_CommonNH_df.to_csv(Adenin_CommonNH)
Ribose_allH_df=pd.DataFrame(Ribose_allH_Lig_Resdict)
Ribose_allH_df.to_csv(Ribose_allH)
Ribose_allNH_df=pd.DataFrame(Ribose_allNH_Lig_Resdict)
Ribose_allNH_df.to_csv(Ribose_allNH)
Ribose_CommonH_df=pd.DataFrame(Ribose_CommonH_Lig_Resdict)
Ribose_CommonH_df.to_csv(Ribose_CommonH)
Ribose_CommonNH_df=pd.DataFrame(Ribose_CommonNH_Lig_Resdict)
Ribose_CommonNH_df.to_csv(Ribose_CommonNH)
METHI_allH_df=pd.DataFrame(METHI_allH_Lig_Resdict)
METHI_allH_df.to_csv(METHI_allH)
METHI_allNH_df=pd.DataFrame(METHI_allNH_Lig_Resdict)
METHI_allNH_df.to_csv(METHI_allNH)
METHI_CommonH_df=pd.DataFrame(METHI_CommonH_Lig_Resdict)
METHI_CommonH_df.to_csv(METHI_CommonH)
METHI_CommonNH_df=pd.DataFrame(METHI_CommonNH_Lig_Resdict)
METHI_CommonNH_df.to_csv(METHI_CommonNH)
#############END of filenames for csv download ############
###Link to download file
#print '<p style=text-align:center >Download: <a href=%s download>Interaction Data</a>'% SubstructureExcel
print "<p align='center'>################################################################","</p>"
print "<p style='font-size:20px; color:blue' align='center'>Adenin sub group structure","</p>"
print '<p style=text-align:center >Download: <a href=%s download>All bonded, </a>' % Adenin_allH
print ' <a href=%s download>All non-bonded, </a>' % Adenin_allNH
print ' <a href=%s download>Common bonded, </a>' % Adenin_CommonH
print ' <a href=%s download>Common non-bonded</a>' % Adenin_CommonNH,"</p>"
print "<p align='center'>################################################################" ,"</p>"
print "<button class='collapsible'>I. All bonded interactions - Click to read basic statistical information</button>"#Start of click drop down
print "<div class='contentsection'>"
print "<p style='font-size:20px; color:black' align='center'>"
print " Number of Ligand atoms:", len(Adenin), "<br/>"
print " Number of PDB IDs:", len(Adenin_allNH_Lig_Resdict.keys()), "<br/>"
print "<div class='row'>"# spliting into two columns
print "<div class='column'>"# spliting into two columns
if bool(Adenin_allH_Lig_Resdict):
print "Statistics of Bonded Intercations"
print percentage(Adenin_allH_Lig_Resdict,Adenin)
if bool(Adenin_allH_Lig_Resdict_distance):
print distance_calc(Adenin_allH_Lig_Resdict_distance)
print "</div>"# closing of first columns
print "<div class='column'>"
if bool(Adenin_allNH_Lig_Resdict):
print "Statistics of Non-Bonded Intercations", "<br/>"
print percentage(Adenin_allNH_Lig_Resdict,Adenin)
if bool(Adenin_allNH_Lig_Resdict_distance):
print distance_calc(Adenin_allNH_Lig_Resdict_distance)
print "</div>"# closing of second columns
print "</div>"#closing of row
print "</div>"#End of click drop down
print "<br/>"
print """
<div class="grid">
<div class="col-2-3">
<div class="module">
"""#Initialization of Adenin grid section
if bool(Adenin_allH_Lig_Resdict):
print "<p style='font-size:20px; color:brown'>List of residues: hydrogen bonds contacts" ,"</p>"
df_Adenin_allH_Lig_Resdict=pd.DataFrame.from_dict(Adenin_allH_Lig_Resdict).fillna('NIL')
print (df_Adenin_allH_Lig_Resdict.to_html(justify='center'))
#print pd.DataFrame.from_dict(Adenin_allH_Lig_Resdict).to_html(justify='center')#for all ligand atoms - hydrogen bonded
else:
print "<p style='font-size:20px; color:brown'>List of residues: hydrogen bonds contacts" ,"</p>"
print "No Interactions"
####################All Residues Colored Table for Adenin: H bonded################################
H_templist4graph=[]
H_graphdic1={}
if bool(Adenin_graphdicH):
for k,v in Adenin_graphdicH.iteritems():
#print k
for value in v:
H_templist4graph.append(value)
samp=sorted(list(set(H_templist4graph)))
H_graphdic1.setdefault('%s'%k,[]).append(', '.join(samp))
H_templist4graph=[]
length_listofcompiledresidues=[]
for key,value in H_graphdic1.iteritems():
for i in value:
valu=i.split(', ')
#print valu
#print len(valu)
length_listofcompiledresidues.append(len(valu))
length_ofcell=max(length_listofcompiledresidues)
print "<p style='font-size:20px; color:brown'> Physicochemical property based color-coding of common amino acids: hydrogen bonds contacts ","</p>"
print "<table border='1'>"
print "<tr>"
print "<th col width='60'>Ligand Atoms</th>"
print "<th colspan='%d'>List of residues from analysed protein structures</th>"% length_ofcell
print "</tr>"
for key in sorted(H_graphdic1.iterkeys()):
print "<td align='center'>%s</td>" %key
for g1 in H_graphdic1[key]:
dat1= g1.split(', ')
for H_k3 in dat1:
print "<td align='center'>"
#print k3
if H_k3.startswith(('ALA','ILE','LEU','MET','MSE','VAL')):
print "<b><font color='pink'>%s</font></b>"%H_k3
if H_k3.startswith(('PHE','TRP', 'TYR')):
print " <b><font color='orange'>%s</font></b>"%H_k3
if H_k3.startswith(('LYS','ARG', 'HIS')):
print " <b><font color='red'>%s</font></b>"%H_k3
if H_k3.startswith(('GLU','ASP')):
print " <b><font color='green'>%s</font></b>"%H_k3
if H_k3.startswith(('ASN','GLN','SER','THR')):
print " <b><font color='blue'>%s</font></b>"%H_k3
if H_k3.startswith(('GLY','PRO')):
print " <b><font color='magenta'>%s</font></b>"%H_k3
if H_k3.startswith(('CYS','CME')):
print " <b><font color='yellow'>%s</font></b>"%H_k3
print "</td>"
#print "<tr>"
print "</tr>"
print "</table>"
else:
print "<p style='font-size:20px; color:brown'> Physicochemical property based color-coding of common amino acids: hydrogen bonds contacts ","</p>"
print "No Interactions"
if bool(Adenin_allNH_Lig_Resdict):
print "<p style='font-size:20px; color:brown'>List of residues: non-bonded contacts","</p>"
df_Adenin_allNH_Lig_Resdict=pd.DataFrame.from_dict(Adenin_allNH_Lig_Resdict).fillna('NIL')
print (df_Adenin_allNH_Lig_Resdict.to_html(justify='center'))
#print pd.DataFrame.from_dict(Adenin_allNH_Lig_Resdict).to_html(justify='center')#for all ligand atoms - Non hydrogen bonded
else:
print "<p style='font-size:20px; color:brown'>List of residues: non-bonded contacts","</p>"
print "No Interactions"
####################All | |
''' Count-Min Sketch python implementation
License: MIT
Author: <NAME> (<EMAIL>)
URL: https://github.com/barrust/count-min-sketch
'''
from __future__ import (unicode_literals, absolute_import, print_function,
division)
import os
import math
from numbers import Number
from struct import (pack, unpack, calcsize)
from .. exceptions import (InitializationError, NotSupportedError)
from .. hashes import (default_fnv_1a)
from .. utilities import (is_valid_file)
from .. constants import (INT32_T_MIN, INT32_T_MAX, INT64_T_MIN, INT64_T_MAX)
class CountMinSketch(object):
''' Simple Count-Min Sketch implementation for use in python;
It can read and write the same format as the c version
(https://github.com/barrust/count-min-sketch)
Args:
width (int): The width of the count-min sketch
depth (int): The depth of the count-min sketch
confidence (float): The level of confidence desired
error_rate (float): The desired error rate
filepath (str): Path to file to load
hash_function (function): Hashing strategy function to use \
`hf(key, number)`
Returns:
CountMinSketch: A Count-Min Sketch object
Note:
Initialization order of operations:
1) From file
2) Width and depth
3) Confidence and error rate
Note:
Default query type is `min`
Note:
For width and depth, width may realistically be in the thousands \
while depth is in the single digit to teens '''
__slots__ = [
'__width', '__depth', '__confidence', '__error_rate',
'__elements_added', '__query_method', '_bins', '_hash_function'
]
def __init__(self, width=None, depth=None, confidence=None,
error_rate=None, filepath=None, hash_function=None):
''' default initilization function '''
# default values
self.__width = 0
self.__depth = 0
self.__confidence = 0.0
self.__error_rate = 0.0
self.__elements_added = 0
self.__query_method = self.__min_query
if is_valid_file(filepath):
self.__load(filepath)
elif width is not None and depth is not None:
valid_prms = (isinstance(width, Number) and width > 0 and
isinstance(depth, Number) and depth > 0)
if not valid_prms:
msg = 'CountMinSketch: width and depth must be greater than 0'
raise InitializationError(msg)
self.__width = int(width)
self.__depth = int(depth)
self.__confidence = 1 - (1 / math.pow(2, self.depth))
self.__error_rate = 2 / self.width
self._bins = [0] * (self.width * self.depth)
elif confidence is not None and error_rate is not None:
valid_prms = (isinstance(confidence, Number) and confidence > 0 and
isinstance(error_rate, Number) and error_rate > 0)
if not valid_prms:
msg = 'CountMinSketch: width and depth must be greater than 0'
raise InitializationError(msg)
self.__confidence = confidence
self.__error_rate = error_rate
self.__width = math.ceil(2 / error_rate)
numerator = (-1 * math.log(1 - confidence))
self.__depth = math.ceil(numerator / 0.6931471805599453)
self._bins = [0] * int(self.width * self.depth)
else:
msg = ('Must provide one of the following to initialize the '
'Count-Min Sketch:\n'
' A file to load,\n'
' The width and depth,\n'
' OR confidence and error rate')
raise InitializationError(msg)
if hash_function is None:
self._hash_function = default_fnv_1a
else:
self._hash_function = hash_function
def __str__(self):
''' string representation of the count min sketch '''
msg = ('Count-Min Sketch:\n'
'\tWidth: {0}\n'
'\tDepth: {1}\n'
'\tConfidence: {2}\n'
'\tError Rate: {3}\n'
'\tElements Added: {4}')
return msg.format(self.width, self.depth, self.confidence,
self.error_rate, self.elements_added)
@property
def width(self):
''' int: The width of the count-min sketch
Note:
Not settable '''
return self.__width
@property
def depth(self):
''' int: The depth of the count-min sketch
Note:
Not settable '''
return self.__depth
@property
def confidence(self):
''' float: The confidence of the count-min sketch
Note:
Not settable '''
return self.__confidence
@property
def error_rate(self):
''' float: The error rate of the count-min sketch
Note:
Not settable '''
return self.__error_rate
@property
def elements_added(self):
''' int: The number of elements added to the count-min sketch
Note:
Not settable '''
return self.__elements_added
@property
def query_type(self):
''' str: The name of the query type being used
Note:
Valid values:
* 'min' or None
* 'mean'
* 'mean-min' '''
if self.__query_method == self.__mean_query:
return 'mean'
elif self.__query_method == self.__mean_min_query:
return 'mean-min'
return 'min'
@query_type.setter
def query_type(self, val):
''' set to min query Options='min', 'mean', 'mean-min'
other values are set to min
setting to mean is converting to a Count-Mean Sketch
setting to mean-min is converting to a Count-Mean-Min Sketch '''
if val is None:
self.__query_method = self.__min_query
return
val = val.lower()
if val == 'mean':
self.__query_method = self.__mean_query
elif val == 'mean-min':
self.__query_method = self.__mean_min_query
else:
self.__query_method = self.__min_query
def clear(self):
''' Reset the count-min sketch to an empty state '''
self.__elements_added = 0
for i, _ in enumerate(self._bins):
self._bins[i] = 0
def hashes(self, key, depth=None):
''' Return the hashes based on the provided key
Args:
key (str): Description of arg1
depth (int): Number of permutations of the hash to generate; \
if None, generate `number_hashes`
Returns:
List(int): A list of the hashes for the key in int form '''
t_depth = self.depth if depth is None else depth
return self._hash_function(key, t_depth)
def add(self, key, num_els=1):
''' Insert the element `key` into the count-min sketch
Args:
key (str): The element to insert
num_els (int): The number of times to insert the element
Returns:
int: The number of times the element was likely inserted \
after the insertion '''
hashes = self.hashes(key)
return self.add_alt(hashes, num_els)
def add_alt(self, hashes, num_els=1):
''' Insert an element by using the hash representation
Args:
key (str): The element to insert
num_els (int): The number of times to insert the element
Returns:
int: The number of times the element was likely inserted \
after the insertion '''
res = list()
for i, val in enumerate(hashes):
t_bin = (val % self.width) + (i * self.width)
self._bins[t_bin] += num_els
if self._bins[t_bin] > INT32_T_MAX:
self._bins[t_bin] = INT32_T_MAX
res.append(self._bins[t_bin])
self.__elements_added += num_els
if self.elements_added > INT64_T_MAX:
self.__elements_added = INT64_T_MAX
return self.__query_method(sorted(res))
def remove(self, key, num_els=1):
''' Remove element 'key' from the count-min sketch
Args:
key (str): The element to remove
num_els (int): The number of times to remove the element
Returns:
int: The number of times the element was likely inserted \
after the removal '''
hashes = self.hashes(key)
return self.remove_alt(hashes, num_els)
def remove_alt(self, hashes, num_els=1):
''' Remove an element by using the hash representation
Args:
hashes (list): The hashes representing the element to remove
num_els (int): The number of times to remove the element
Returns:
int: The number of times the element was likely inserted \
after the removal '''
res = list()
for i, val in enumerate(hashes):
t_bin = (val % self.width) + (i * self.width)
self._bins[t_bin] -= num_els
if self._bins[t_bin] < INT32_T_MIN:
self._bins[t_bin] = INT32_T_MIN
res.append(self._bins[t_bin])
self.__elements_added -= num_els
if self.elements_added < INT64_T_MIN:
self.__elements_added = INT64_T_MIN
return self.__query_method(sorted(res))
def check(self, key):
''' Check number of times element 'key' is in the count-min sketch
Args:
key (str): The key to check the number of times inserted
Returns:
int: The number of times the element was likely inserted '''
hashes = self.hashes(key)
return self.check_alt(hashes)
def check_alt(self, hashes):
''' Check the count-min sketch for an element by using the hash \
representation
Args:
hashes (list): The hashes representing the element to check
Returns:
int: The number of times the element was likely inserted '''
bins = self.__get_values_sorted(hashes)
return self.__query_method(bins)
def export(self, filepath):
''' Export the count-min sketch to disk
Args:
filename (str): The filename to which the count-min sketch \
will be written. '''
with open(filepath, 'wb') as filepointer:
# write out the bins
rep = 'i' * len(self._bins)
filepointer.write(pack(rep, *self._bins))
filepointer.write(pack('IIq', self.width, self.depth,
self.elements_added))
def __load(self, filepath):
''' load the count-min sketch from file '''
with open(filepath, 'rb') as filepointer:
offset = calcsize('IIq')
filepointer.seek(offset * -1, os.SEEK_END)
mybytes = unpack('IIq', filepointer.read(offset))
self.__width = mybytes[0]
self.__depth = mybytes[1]
self.__elements_added = mybytes[2]
self.__confidence = 1 - (1 / math.pow(2, self.depth))
self.__error_rate = 2 / self.width
filepointer.seek(0, os.SEEK_SET)
length = self.width * self.depth
rep = 'i' * length
offset = calcsize(rep)
self._bins = list(unpack(rep, filepointer.read(offset)))
def __get_values_sorted(self, hashes):
''' get the values sorted '''
bins = list()
for i, val in enumerate(hashes):
t_bin = (val % self.width) + (i * self.width)
bins.append(self._bins[t_bin])
bins.sort()
return bins
@staticmethod
def __min_query(results):
''' generate the min query; assumes sorted list '''
return results[0]
def __mean_query(self, results):
''' generate the mean query; assumes sorted list '''
return sum(results) // self.depth
def __mean_min_query(self, results):
''' generate the mean-min query; assumes sorted list '''
meanmin = list()
for t_bin in results:
diff = self.elements_added - t_bin
calc = t_bin - diff | |
import io
import psycopg2
from psycopg2 import sql
from psycopg2.extras import RealDictCursor
import sys
import json
import datetime
import decimal
import time
import os
import binascii
from distutils.sysconfig import get_python_lib
import multiprocessing as mp
import datetime
import pandas as pd
import pdb
import inspect
from ..gpss import data_pb2
from ..gpss import data_pb2_grpc
from ..gpss import gpss_pb2
from ..gpss import gpss_pb2_grpc
import grpc
import google.protobuf
import google.protobuf.text_format as tf
from google.protobuf.timestamp_pb2 import Timestamp
from dateutil.parser import parse
class pg_encoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, datetime.time) or \
isinstance(obj, datetime.datetime) or \
isinstance(obj, datetime.date) or \
isinstance(obj, decimal.Decimal) or \
isinstance(obj, datetime.timedelta) or \
isinstance(obj, set) or\
isinstance(obj, frozenset) or\
isinstance(obj, bytes):
return str(obj)
return json.JSONEncoder.default(self, obj)
class pgsql_source(object):
def __init__(self):
"""
Class constructor, the method sets the class variables and configure the
operating parameters from the args provided t the class.
"""
self.schema_tables = {}
self.schema_mappings = {}
self.schema_loading = {}
self.schema_list = []
self.schema_only = {}
def __del__(self):
"""
Class destructor, tries to disconnect the postgresql connection.
"""
pass
def __set_copy_max_memory(self):
"""
The method sets the class variable self.copy_max_memory using the value stored in the
source setting.
"""
copy_max_memory = str(self.source_config["copy_max_memory"])[:-1]
copy_scale = str(self.source_config["copy_max_memory"])[-1]
try:
int(copy_scale)
copy_max_memory = self.source_config["copy_max_memory"]
except:
if copy_scale =='k':
copy_max_memory = str(int(copy_max_memory)*1024)
elif copy_scale =='M':
copy_max_memory = str(int(copy_max_memory)*1024*1024)
elif copy_scale =='G':
copy_max_memory = str(int(copy_max_memory)*1024*1024*1024)
else:
print("**FATAL - invalid suffix in parameter copy_max_memory (accepted values are (k)ilobytes, (M)egabytes, (G)igabytes.")
sys.exit(3)
self.copy_max_memory = copy_max_memory
def __init_sync(self):
"""
The method calls the common steps required to initialise the database connections and
class attributes within sync_tables,refresh_schema and init_replica.
"""
self.source_config = self.sources[self.source]
self.out_dir = self.source_config["out_dir"]
self.copy_mode = self.source_config["copy_mode"]
self.pg_engine.lock_timeout = self.source_config["lock_timeout"]
self.pg_engine.grant_select_to = self.source_config["grant_select_to"]
self.source_conn = self.source_config["db_conn"]
self.__set_copy_max_memory()
db_object = self.__connect_db( auto_commit=True, dict_cursor=True)
self.pgsql_conn = db_object["connection"]
self.pgsql_cursor = db_object["cursor"]
self.pg_engine.connect_db()
self.schema_mappings = self.pg_engine.get_schema_mappings()
self.pg_engine.schema_tables = self.schema_tables
def __connect_db(self, auto_commit=True, dict_cursor=False):
"""
Connects to PostgreSQL using the parameters stored in self.dest_conn. The dictionary is built using the parameters set via adding the key dbname to the self.pg_conn dictionary.
This method's connection and cursors are widely used in the procedure except for the replay process which uses a
dedicated connection and cursor.
:return: a dictionary with the objects connection and cursor
:rtype: dictionary
"""
if self.source_conn:
strconn = "dbname=%(database)s user=%(user)s host=%(host)s password=%(password)s port=%(port)s connect_timeout=%(connect_timeout)s" % self.source_conn
pgsql_conn = psycopg2.connect(strconn)
pgsql_conn .set_client_encoding(self.source_conn["charset"])
if dict_cursor:
pgsql_cur = pgsql_conn .cursor(cursor_factory=RealDictCursor)
else:
pgsql_cur = pgsql_conn .cursor()
self.logger.debug("Changing the autocommit flag to %s" % auto_commit)
pgsql_conn.set_session(autocommit=auto_commit)
elif not self.source_conn:
self.logger.error("Undefined database connection string. Exiting now.")
sys.exit()
return {'connection': pgsql_conn, 'cursor': pgsql_cur }
def __export_snapshot(self, queue):
"""
The method exports a database snapshot and stays idle in transaction until a message from the parent
process tell it to exit.
The method stores the snapshot id in the queue for the parent's usage.
:param queue: the queue object used to exchange messages between the parent and the child
"""
self.logger.debug("exporting database snapshot for source %s" % self.source)
sql_snap = """
BEGIN TRANSACTION ISOLATION LEVEL REPEATABLE READ;
SELECT pg_export_snapshot();
"""
db_snap = self.__connect_db(False)
db_conn = db_snap["connection"]
db_cursor = db_snap["cursor"]
db_cursor.execute(sql_snap)
snapshot_id = db_cursor.fetchone()[0]
queue.put(snapshot_id)
continue_loop = True
while continue_loop:
continue_loop = queue.get()
time.sleep(5)
db_conn.commit()
def __build_table_exceptions(self):
"""
The method builds two dictionaries from the limit_tables and skip tables values set for the source.
The dictionaries are intended to be used in the get_table_list to cleanup the list of tables per schema.
The method manages the particular case of when the class variable self.tables is set.
In that case only the specified tables in self.tables will be synced. Should limit_tables be already
set, then the resulting list is the intersection of self.tables and limit_tables.
"""
self.limit_tables = {}
self.skip_tables = {}
limit_tables = self.source_config["limit_tables"]
skip_tables = self.source_config["skip_tables"]
if self.tables !='*':
tables = [table.strip() for table in self.tables.split(',')]
if limit_tables:
limit_tables = [table for table in tables if table in limit_tables]
else:
limit_tables = tables
self.schema_only = {table.split('.')[0] for table in limit_tables}
if limit_tables:
table_limit = [table.split('.') for table in limit_tables]
for table_list in table_limit:
list_exclude = []
try:
list_exclude = self.limit_tables[table_list[0]]
list_exclude.append(table_list[1])
except KeyError:
list_exclude.append(table_list[1])
self.limit_tables[table_list[0]] = list_exclude
if skip_tables:
table_skip = [table.split('.') for table in skip_tables]
for table_list in table_skip:
list_exclude = []
try:
list_exclude = self.skip_tables[table_list[0]]
list_exclude.append(table_list[1])
except KeyError:
list_exclude.append(table_list[1])
self.skip_tables[table_list[0]] = list_exclude
def __get_table_list(self):
"""
The method pulls the table list from the information_schema.
The list is stored in a dictionary which key is the table's schema.
"""
sql_tables="""
SELECT
table_name
FROM
information_schema.TABLES
WHERE
table_type='BASE TABLE'
AND table_schema=%s
;
"""
for schema in self.schema_list:
self.pgsql_cursor.execute(sql_tables, (schema, ))
table_list = [table["table_name"] for table in self.pgsql_cursor.fetchall()]
try:
limit_tables = self.limit_tables[schema]
if len(limit_tables) > 0:
table_list = [table for table in table_list if table in limit_tables]
except KeyError:
pass
try:
skip_tables = self.skip_tables[schema]
if len(skip_tables) > 0:
table_list = [table for table in table_list if table not in skip_tables]
except KeyError:
pass
self.schema_tables[schema] = table_list
def __create_destination_schemas(self):
"""
Creates the loading schemas in the destination database and associated tables listed in the dictionary
self.schema_tables.
The method builds a dictionary which associates the destination schema to the loading schema.
The loading_schema is named after the destination schema plus with the prefix _ and the _tmp suffix.
As postgresql allows, by default up to 64 characters for an identifier, the original schema is truncated to 59 characters,
in order to fit the maximum identifier's length.
The mappings are stored in the class dictionary schema_loading.
"""
for schema in self.schema_list:
destination_schema = self.schema_mappings[schema]
loading_schema = "_%s_tmp" % destination_schema[0:59]
self.schema_loading[schema] = {'destination':destination_schema, 'loading':loading_schema}
self.logger.debug("Creating the schema %s." % loading_schema)
self.pg_engine.create_database_schema(loading_schema)
self.logger.debug("Creating the schema %s." % destination_schema)
self.pg_engine.create_database_schema(destination_schema)
def __get_table_metadata(self, table, schema):
"""
The method builds the table's metadata querying the information_schema.
The data is returned as a dictionary.
:param table: The table name
:param schema: The table's schema
:return: table's metadata as a cursor dictionary
:rtype: dictionary
"""
sql_metadata="""
SELECT
col.attname as column_name,
(
SELECT
pg_catalog.pg_get_expr(def.adbin, def.adrelid)
FROM
pg_catalog.pg_attrdef def
WHERE
def.adrelid = col.attrelid
AND def.adnum = col.attnum
AND col.atthasdef
) as column_default,
col.attnum as ordinal_position,
CASE
WHEN typ.typcategory ='E'
THEN
'enum'
WHEN typ.typcategory='C'
THEN
'composite'
ELSE
pg_catalog.format_type(col.atttypid, col.atttypmod)
END
AS type_format,
(
SELECT
pg_get_serial_sequence(format('%%I.%%I',tabsch.nspname,tab.relname), col.attname) IS NOT NULL
FROM
pg_catalog.pg_class tab
INNER JOIN pg_catalog.pg_namespace tabsch
ON tab.relnamespace=tabsch.oid
WHERE
tab.oid=col.attrelid
) as col_serial,
typ.typcategory as type_category,
CASE
WHEN typ.typcategory='E'
THEN
(
SELECT
string_agg(quote_literal(enumlabel),',')
FROM
pg_catalog.pg_enum enm
WHERE enm.enumtypid=typ.oid
)
WHEN typ.typcategory='C'
THEN
(
SELECT
string_agg(
format('%%I %%s',
attname,
pg_catalog.format_type(atttypid, atttypmod)
)
,
','
)
FROM
pg_catalog.pg_attribute
WHERE
attrelid=format(
'%%I.%%I',
sch.nspname,
typ.typname)::regclass
)
END AS typ_elements,
col.attnotnull as not_null
FROM
pg_catalog.pg_attribute col
INNER JOIN pg_catalog.pg_type typ
ON col.atttypid=typ.oid
INNER JOIN pg_catalog.pg_namespace sch
ON typ.typnamespace=sch.oid
WHERE
col.attrelid = %s::regclass
AND NOT col.attisdropped
AND col.attnum>0
ORDER BY
col.attnum
;
;
"""
tab_regclass = '"%s"."%s"' % (schema, table)
self.pgsql_cursor.execute(sql_metadata, (tab_regclass, ))
table_metadata=self.pgsql_cursor.fetchall()
return table_metadata
def __create_destination_tables(self):
"""
The method creates the destination tables in the loading schema.
The tables names are looped using the values stored in the class dictionary schema_tables.
"""
for schema in self.schema_tables:
table_list = self.schema_tables[schema]
for table in table_list:
table_metadata = self.__get_table_metadata(table, schema)
self.pg_engine.create_table(table_metadata, table, schema, 'pgsql')
def __drop_loading_schemas(self):
"""
The method drops the loading schemas from the destination database.
The drop is performed on the schemas generated in create_destination_schemas.
The method assumes the class dictionary schema_loading is correctly set.
"""
for schema in self.schema_loading:
loading_schema = self.schema_loading[schema]["loading"]
self.logger.debug("Dropping the schema %s." % loading_schema)
self.pg_engine.drop_database_schema(loading_schema, True)
def __copy_data(self, schema, table, db_copy):
sql_snap = """
BEGIN TRANSACTION ISOLATION LEVEL REPEATABLE READ;
SET TRANSACTION SNAPSHOT %s;
"""
out_file = '%s/%s_%s.csv' % (self.out_dir, schema, table )
loading_schema = self.schema_loading[schema]["loading"]
from_table = '"%s"."%s"' % (schema, table)
to_table = '"%s"."%s"' % (loading_schema, table)
db_conn = db_copy["connection"]
db_cursor = db_copy["cursor"]
if self.snapshot_id:
db_cursor.execute(sql_snap, (self.snapshot_id, ))
self.logger.debug("exporting table %s.%s in %s" % (schema , table, out_file))
copy_file = open(out_file, 'wb')
db_cursor.copy_to(copy_file, from_table)
copy_file.close()
self.logger.debug("loading the file %s in table %s.%s " % (out_file, loading_schema , table, ))
copy_file = open(out_file, 'rb')
self.pg_engine.pgsql_cur.copy_from(copy_file, to_table)
copy_file.close()
db_conn.commit()
try:
os.remove(out_file)
except:
pass
def __create_indices(self):
"""
The method loops over the tables, queries the origin's database and creates the same indices
on the loading schema.
"""
db_copy = self.__connect_db(False)
db_conn = db_copy["connection"]
db_cursor = db_copy["cursor"]
sql_get_idx = """
SELECT
CASE
WHEN con.conname IS NOT NULL
THEN
format('ALTER TABLE %%I ADD CONSTRAINT %%I %%s ;',tab.relname,con.conname,pg_get_constraintdef(con.oid))
ELSE
format('%%s ;',regexp_replace(pg_get_indexdef(idx.oid), '("?\w+"?\.)', ''))
END AS ddl_text,
CASE
WHEN con.conname IS NOT NULL
THEN
format('primary key on %%I',tab.relname)
ELSE
format('index %%I on %%I',idx.relname,tab.relname)
END AS ddl_msg,
CASE
WHEN con.conname IS NOT NULL
THEN
True
ELSE
False
END AS table_pk
FROM
pg_class tab
INNER JOIN pg_namespace sch
ON
sch.oid=tab.relnamespace
INNER JOIN pg_index ind
ON
ind.indrelid=tab.oid
INNER JOIN pg_class idx
ON
ind.indexrelid=idx.oid
LEFT OUTER JOIN pg_constraint con
ON
con.conrelid=tab.oid
AND idx.oid=con.conindid
WHERE
(
contype='p'
OR contype IS NULL
)
AND tab.relname=%s
AND sch.nspname=%s
;
"""
for schema in self.schema_tables:
table_list = self.schema_tables[schema]
for table in table_list:
loading_schema = self.schema_loading[schema]["loading"]
destination_schema = self.schema_loading[schema]["destination"]
self.pg_engine.pgsql_cur.execute('SET search_path=%s;', (loading_schema, ))
db_cursor.execute(sql_get_idx, (table, schema))
idx_tab = db_cursor.fetchall()
for idx in idx_tab:
self.logger.info('Adding %s', (idx[1]))
try:
self.pg_engine.pgsql_cur.execute(idx[0])
except:
self.logger.error("an error occcurred when executing %s" %(idx[0]))
if idx[2]:
self.pg_engine.store_table(destination_schema, table, ['foo'], None)
db_conn.close()
def __copy_tables(self):
"""
The method copies the data between tables, from the postgres source and the corresponding
postgresql loading schema. Before the process starts a snapshot is exported in order to get
a consistent database copy at the time of the snapshot.
"""
db_copy = self.__connect_db(False)
check_cursor = db_copy["cursor"]
db_conn = db_copy["connection"]
sql_recovery = """
SELECT pg_is_in_recovery();
"""
check_cursor.execute(sql_recovery)
db_in_recovery = check_cursor.fetchone()
db_conn.commit()
if not db_in_recovery[0]:
queue = mp.Queue()
snap_exp = mp.Process(target=self.__export_snapshot, args=(queue,), name='snap_export',daemon=True)
snap_exp.start()
self.snapshot_id = queue.get()
self.consistent = False
else:
self.snapshot_id = None
self.consistent = False
for schema in self.schema_tables:
table_list = self.schema_tables[schema]
for table in table_list:
self.__copy_data(schema, table, db_copy)
if not db_in_recovery[0]:
queue.put(False)
db_conn.close()
def init_replica(self):
"""
The method performs a full init replica for the given source
"""
self.logger.debug("starting init replica for source %s" % self.source)
self.__init_sync()
if self.dest_conn["use_gpss"] == True:
self.connectToGpss()
self.schema_list = [schema for schema in self.schema_mappings]
self.__build_table_exceptions()
self.__get_table_list()
self.__create_destination_schemas()
self.pg_engine.schema_loading = self.schema_loading
self.pg_engine.set_source_status("initialising")
try:
self.__create_destination_tables()
self.__copy_tables()
self.__create_indices()
self.pg_engine.grant_select()
self.pg_engine.swap_schemas()
self.__drop_loading_schemas()
self.pg_engine.set_source_status("initialised")
fake_master = [{'File': None, 'Position': None }]
self.pg_engine.set_source_highwatermark(fake_master, consistent=self.consistent)
notifier_message = "init replica for source %s is complete" % self.source
self.notifier.send_message(notifier_message, 'info')
self.logger.info(notifier_message)
except:
self.__drop_loading_schemas()
self.pg_engine.set_source_status("error")
notifier_message = "init replica for source %s failed" % self.source
self.notifier.send_message(notifier_message, 'critical')
self.logger.critical(notifier_message)
raise
class pg_engine(object):
def __init__(self):
python_lib=get_python_lib()
self.sql_dir = "%s/pg_chameleon/sql/" % python_lib
self.sql_upgrade_dir = "%s/upgrade/" % self.sql_dir
self.table_ddl={}
self.idx_ddl={}
self.type_ddl={}
self.idx_sequence=0
self.type_dictionary = {
'integer':'integer',
'mediumint':'bigint',
'tinyint':'integer',
'smallint':'integer',
'int':'integer',
'bigint':'bigint',
'varchar':'character varying',
'character varying':'character varying',
'text':'text',
'char':'character',
'datetime':'timestamp without time zone',
'date':'date',
'time':'time without time zone',
'timestamp':'timestamp without time zone',
'tinytext':'text',
'mediumtext':'text',
'longtext':'text',
'tinyblob':'bytea',
'mediumblob':'bytea',
'longblob':'bytea',
'blob':'bytea',
'binary':'bytea',
'varbinary':'bytea',
'decimal':'numeric',
'double':'double precision',
'double precision':'double precision',
'float':'double precision',
'bit':'integer',
'year':'integer',
'enum':'enum',
'set':'text',
'json':'json',
'bool':'boolean',
'boolean':'boolean',
'geometry':'bytea',
}
self.dest_conn = None
self.pgsql_conn = None
self.logger = None
self.idx_sequence = 0
self.lock_timeout = 0
self.migrations = [
{'version': '2.0.1', 'script': '200_to_201.sql'},
{'version': '2.0.2', 'script': '201_to_202.sql'},
{'version': '2.0.3', 'script': '202_to_203.sql'}, | |
<filename>dev/WebTrader.py
#-*- coding:UTF-8 -*-
from time import sleep
from HTSocket import HTSocket
import pandas as pd
import tushare as ts
import SocketTrader as st
import ShellTrader as sht
#from numba import jit
import pymongo
#import json
import logging
logging.basicConfig(level=logging.INFO, filename='.pyTrader.log', filemode='w')
logging.captureWarnings(True)
'''
模仿windPy的功能, 包裹HTSocket.py, 或來自第三方其他證券公司web api,
主要提供多賬戶同時使用的便利, 保持與windPy接口一致
'''
class WebTrader(object):
'''主要實現以下功能'''
def __init__(self):
''' doesn't work
self.api_name = api_name
self.account = account
self.password = password
self.isOn = False
'''
#self.logon_object = logon_object
#mrl = r'mongodb://192.168.1.101:3001/meteor'
mrl = r'mongodb://localhost:3001/meteor'
# 華泰證券
class HTTrader(WebTrader):
"""
我是一個(logon後)已經登錄的華泰證券交易端,可接受買賣查詢等委託,接口與萬得保持一致,以便同時使用.
"""
def __init__(self, bname, account, <PASSWORD>_password, \
service_password=None, api_name='htweb', limitAmnt=100000, mrl=mrl):
super(HTTrader, self).__init__()
self.limitAmnt = limitAmnt # 單筆操作數量有限制
self.bname = bname
self.api_name = api_name
self.account = account
self.liveId = self.api_name + self.account
self.password = <PASSWORD>
self.encrypted_password = <PASSWORD>
self.service_password = <PASSWORD>_password
self.isOn = False
self.stockAccounts = {}
self.api_class = HTSocket
self.api_func = lambda : self.api_class(self.account, \
self.encrypted_password, self.service_password)
self.api = self.api_func()
self.dormb = 6.4778
self.hkrmb = 0.8386
self.rmbdo = 1/self.dormb
self.rmbhk = 1/self.hkrmb
'''
連接Meteor 數據庫
'''
self.mrl = mrl
try:
self.conn = pymongo.MongoClient(self.mrl)
self.securities = self.conn.meteor.securities
self.tradings = self.conn.meteor.tradings
self.funds = self.conn.meteor.funds
except Exception as e:
print('WebTrader init:')
print(e)
def db_find(self, db, something={}):
return pd.DataFrame(list(db.find({})))
def db_upsert_one(self, db, upfilter, update):
# return db.update_one(upfilter, update, upsert=True)
try:
return db.update(upfilter, update, upsert=True)
except Exception as e:
print('WebTrader db_upsert_one:')
print(e)
def db_insert(self, db, something):
try:
return db.insert(something)
except Exception as e:
print('WebTrader db_insert:')
print(e)
def db_insert_df(self, db, df):
try:
newdict = df.T.to_dict()
for idx in newdict:
db.insert(newdict[idx])
return self
except Exception as e:
print('WebTrader db_insert_df:')
print(e)
def recordAccounts(self, items):
for item in items:
self.stockAccounts[item['exchange_type']] = item
#print(self.stockAccounts)
def money(self, code):
'''
'0', 人民幣, '1', 美元, '2', 港幣
'''
if code[:2] == '90':
mt = '1'
elif code[:2] == '20':
mt = '2'
else:
mt = '0'
cptl = self.getCapital()
_AvailableFund = cptl.ix[mt,'AvailableFund']
_TotalAsset = cptl.ix[mt,'TotalAsset']
_ratio = cptl.ix[mt,'rmb_value'] / cptl.ix[mt, 'rmb_total']
return _TotalAsset, _AvailableFund, _ratio
def getExCount(self, code):
if code[:2] == '15' or code[:2] == '16' or code[:2] == '00':
return '2', self.stockAccounts['2']['stock_account']
elif code[:2] == '50' or code[:2] == '51' or code[:2] == '60':
return '1', self.stockAccounts['1']['stock_account']
elif code[:2] == '90':
return 'D', self.stockAccounts['D']['stock_account']
elif code[:2] == '20':
return 'H', self.stockAccounts['H']['stock_account']
else:
raise Exception('stock_account not ready for code: '+code)
def logon(self, t=4):
if self.isOn:
print('{bname} {account} : 登錄成功'.format(bname= self.bname, account= self.account))
return self
elif t < 0:
raise Exception('{bname} {account} : 登錄失敗'.format(bname= self.bname, account= self.account))
else:
'''
登錄使用新的 HTSocket object
'''
self.api = self.api_func()
#sleep(3)
print('嘗試登錄,剩餘次數: {n}'.format(n=t-1))
'''
以下一行如果登錄成功,會設置 self.isOn = True
'''
self.autolog()
#self.manulog()
return self.logon(t-1)
def reLogon(self):
self.isOn = False
sleep(3)
return self.logon(8)
def postLogon(self):
self.isOn = True
ks = self.api._HTSocket__trade_keys
self.recordAccounts(ks['item'])
self.logonId = ks['uid']
print('{"api":"ready"}')
def autolog(self):
try:
while True:
if self.api.try_auto_login() and self.api._get_position() and\
self.api._get_balance:
self.postLogon()
return self
sleep(3)
except Exception as e:
print('WebTrader autolog')
print(e)
def manulog(self):
try:
self.api.prepare_login()
self.api.show_verify_code()
vericode = input("input verify code: ")
self.api.enter_verify_code(vericode)
sleep(5)
if self.api.login():
self.postLogon()
self.api.prepare_trade()
return self
except Exception as e:
print('WebTrader manulog')
print(e)
def _getCapital(self, t=3):
if self.api._get_balance():
return pd.DataFrame.from_dict(self.api.capital).T.set_index('money_type')
'''
return pd.DataFrame.from_dict({0: self.api.balance}).T
'''
else:
if t < 0:
self.reLogon()
return self._getCapital()
else:
sleep(2)
return self._getCapital(t-1)
def _getPosition(self, t=5):
'''
取得持仓信息
'''
p = {}
if self.api._get_position():
p = self.api.stock_position
if p:
pdata = pd.DataFrame.from_dict(p)
return pdata
else:
if t < 0:
self.reLogon()
return self._getPosition()
else:
sleep(2)
return self._getPosition(t-1)
def getCapital(self):
'''
取得信息,更名与万德一致
'''
rndict = {
'money_type': 'money_type', # '0',
'money_name': 'money_name', # '人民币',
'market_value': 'market_value', # 100000.0, 股票市值
'fetch_balance': 'fetch_balance', #? '0', 可取
'enable_balance': 'AvailableFund', # 100000.0, 可用资金
'asset_balance': 'TotalAsset', # 10000.0, 总资产
'current_balance': 'current_balance' # '0'
}
jkc = self._getCapital().rename(columns=rndict)
jkc.loc[jkc.money_name == '人民币','rmb_value']=jkc.TotalAsset
'''
不一定有b股賬戶
'''
try:
jkc.loc[jkc.money_name == '美元','rmb_value']=jkc.TotalAsset*self.dormb
jkc.loc[jkc.money_name == '港币','rmb_value']=jkc.TotalAsset*self.hkrmb
except Exception as e:
jkc.dropna()
print('WebTrader getCapital 無外匯而已')
print(e)
jkc.loc[:,'rmb_total'] = jkc.rmb_value.sum()
jkc.loc[:,'acc_id'] = self.liveId
try:
self.db_insert_df(self.funds, jkc)
except Exception as e:
print('WebTrader getCapital db_insert_df: ')
print(e)
return jkc
def getPosition(self):
'''
取得信息,更名与万德一致
'''
rndict = {
'enable_amount': 'SecurityAvail',
'stock_name': 'SecurityName',
'last_price': 'LastPrice',
'income_balance': 'Profit',
'market_value': 'HoldingValue', # 10253.4, # 市值
'keep_cost_price': 'keep_cost_price', # 102.534, # 保本价格
'av_buy_price': 'av_buy_price', #?
'hand_flag': 'hand_flag', # ?
'current_amount': 'SecurityAmount', #? # 股票数量
'stock_code': 'SecurityCode', # 股票代码
'cost_price': 'CostPrice', # 成本价
'exchange_type': 'exchange_type',
'av_income_balance': 'av_income_balance', # ?
'exchange_name': 'exchange_name', # '上海A',
'stock_account': 'stock_account' # 'A111111111'
}
pdata = self._getPosition()
pos = pdata.rename(columns=rndict)
cpt = self.getCapital()
try:
'''
有些沒有B股賬戶,有時賬戶無持倉
'''
pos.loc[(pos.exchange_type=='1')|(pos.exchange_type=='2'),'extra']=\
pos.HoldingValue/cpt.loc['0','TotalAsset']\
- max(0.005, (1 / (1+pos.loc[(pos.exchange_type=='1')|\
(pos.exchange_type=='2'),'Profit'].size)))
pos.loc[pos.exchange_type=='D','extra']=pos.HoldingValue/cpt.loc['1','TotalAsset']\
- max(0.005, (1 / (1+pos.loc[pos.exchange_type=='D','Profit'].size)))
pos.loc[pos.exchange_type=='H','extra']=pos.HoldingValue/cpt.loc['2','TotalAsset']\
- max(0.005, (1 / (1+pos.loc[pos.exchange_type=='H','Profit'].size)))
except Exception as e:
pos.dropna()
print('WebTrader getPosition: 空賬戶而已,並非出錯')
print(e)
return pos
# @jit
def briefPosition(self):
tmp = self.getPosition()
pos = tmp[:]
# pos = pos[pos.SecurityAvail > 100] # 少於等於1手就不動他了
# pos = pos[['SecurityCode' , 'SecurityName', 'Profit', 'TradingCost','CostPrice', 'LastPrice']]
selected = ['SecurityCode' , 'SecurityName', 'Profit', 'SecurityAmount',\
'LastPrice', 'CostPrice', 'HoldingValue', 'SecurityAvail', 'extra'] # ,'keep_cost_price','CostPrice']
pos = pos[selected]
'''
標的品種不賣完
'''
pos = pos[pos.HoldingValue > 0]
pos.loc[:,'PerItemProfit'] = (tmp.Profit / (tmp.HoldingValue - tmp.Profit)) * 100
pos.loc[:,'MaxPrice'] = tmp.LastPrice
pos.loc[:,'MinPrice'] = tmp.LastPrice
pos['stimes'] = 0 # sell times
pos['btimes'] = 0 # buy times
#pos['Fixed'] = pos.SecurityName.str.contains('A') & pos.SecurityCode.str.match(r'^(1|5)')
#return pos.set_index(['SecurityCode', 'SecurityName', 'Fixed']) # .sort_index()
return pos.set_index(['SecurityCode', 'SecurityName']) # .sort_index()
def simplePosition(self):
simp = self.getPosition()[:][['SecurityCode','Profit','LastPrice', 'CostPrice', 'HoldingValue', 'SecurityAvail', 'extra']
]
return simp.set_index('SecurityCode')
def availableSecurity(self, code):
'''
這部份需要改寫,應該一次性提供所有 sellAll 中提到的品種 可用和全部數量 ###
'''
dfa = self.getPosition()
if code in dfa.SecurityCode.values:
info = dfa[dfa.SecurityCode == code]
return info.SecurityAmount.values[0], info.SecurityAvail.values[0]
else:
return 0
def getTrade(self, t=3):
if self.api._get_today_trade():
return pd.DataFrame.from_dict(self.api.trade_list)
else:
if t < 0:
self.reLogon()
return self.getTrade()
else:
sleep(2)
return self.getTrade(t-1)
def _getOrderInfo(self, t=3):
if self.api._get_today_entrust():
return pd.DataFrame.from_dict(self.api.entrust_list)
else:
if t < 0:
self.reLogon()
return self._getOrderInfo()
else:
sleep(2)
return self._getOrderInfo(t-1)
def getOrderInfo(self):
rndict ={
'entrust_price': 'entrust_price', # 102.533, 委托价格
'stock_account': 'stock_account', # 'A1111111', 股东账户
'entrust_time': 'entrust_time', # '110849', 委托时间
'entrust_amount': 'OrderVolume', # 100.0, 委托数量
'stock_name': 'SecurityName', # '银华日利',
'status_name': 'status_name', # '已成',
'exchange_type': 'exchange_type', # '1',
'prop_name': 'prop_name', # '买卖',
'bs_name': 'bs_name', # '买入',
'entrust_status': 'entrust_status', # '8', 8为已成,9为废单,6为已撤,2为已报
'entrust_no': 'entrust_no', # '24410', 委托号
'business_price': 'business_price', # 102.533,
'business_amount': 'TradedVolume', # 100.0,
'entrust_prop': 'entrust_prop', # '0',
'stock_code': 'SecurityCode', # '511880', 股票代码
'entrust_bs': 'entrust_bs', # '1', 1为买入,2为卖出
'exchange_name': 'exchange_name', # '上海A'
}
pdata = self._getOrderInfo()
return pdata.rename(columns=rndict)
# 當日委託
def briefOrderInfo(self):
oinfo = self.getOrderInfo()
_cols_ = ['SecurityCode','SecurityName','business_price',\
'OrderVolume','TradedVolume','entrust_bs']
rndict = {
'SecurityCode': '碼',
'SecurityName': '名',
'entrust_bs': '類',
'business_price': '價',
'OrderVolume': '申',
'TradedVolume': '成'
}
try:
return (oinfo[_cols_]).rename(columns=rndict) # ,'CancelVolume']]
except:
return oinfo
def getWOrders(self, t=3):
if self.api._get_cancel_list():
return pd.DataFrame.from_dict(self.api.cancel_list)
else:
if t < 0:
self.reLogon()
return self.getWOrders()
else:
sleep(2)
return self.getWOrders(t-1)
# 限價買入
#限价委托,price: 2.00 amt: 100
# @jit
def buyAt(self, code, price=0, amount=0):
'''
Buy code at price and amount, expecting those as args
> w.torder("150001.SZ","Buy","2.00","100","OrderType=LMT;LogonId=2")
> buyAt("150153.SZ", "2.001","10000")
'''
if code == '150022':
return 0
if amount == 0:
return 0 # return something?
if amount > self.limitAmnt:
amt = amount - self.limitAmnt
self.buyAt(code, price=price, amount=amt//100*100)
return self.buyAt(code, price=price, amount=self.limitAmnt)
if price == 0: # 如果報價為零,採用本方最優價格BOP,對方為BOC,現價為ITC,鑒於將來以分級A為主,用現價
# dfOrder = wdf(w.torder(code, "Buy", "0", amount, "OrderType=ITC;LogonId={0}".format(self.logonId)))
pass
else:
typ, accnt = self.getExCount(code)
info = self.api._buy(typ, accnt, code, amount=amount, price=price)
return info
# info = self.getOrderInfo()
# return info # info[(info['SecurityCode'] == code) & (info['bs_name'] =='买入')][-1:] # 再選取相應的項目
# @jit
def buyIt(self, code, percent, price=0, t_type=None):
'''
Get Amnt and ask price and buy percentage of all
'''
#print("批量買入:" + code)
if percent < 0:
return 0
if price == 0:
# askPrice = quote126(code).ask1[code[:-3]] + 0.002
askPrice = float(ts.get_realtime_quotes(code).ask[0])
else:
askPrice = price
cp_tot, cp_ava,cp_ratio = self.money(code)
rs = 0
try:
# fund = capit.AvailableFund * percent # <-- 剩餘模式
percent = percent / cp_ratio
fund = min(cp_tot * percent, cp_ava) # <-- 等額模式
'''if fund < 500:
print('{code}: 剩餘資金較少,避免零碎小單累計手續費成本,故暫時不買'.format(code= code))
return 0
'''
amnt = fund // (askPrice*100)
autoAmnt = amnt * 100
if autoAmnt < 100:
print('{'+'"info":"{code}: 資金{percent}%剩餘{fund}不夠{askPrice}買({autoAmnt})"'.format\
(code= code, percent=percent*100,fund=fund, askPrice=askPrice, autoAmnt=autoAmnt)+'}')
# print(capit) # 深圳b股經常報資金餘額不足,看有無問題
return 0
if t_type == 'itc': # 如果採用對方限價
rs = self.buyAt(code, price=0, amount=autoAmnt)
else:
rs = self.buyAt(code, price=askPrice, amount=autoAmnt)
# print("批量買入{code} {autoAmnt}股, 報價:{price}".format(code=code, autoAmnt=autoAmnt, price=askPrice))
self.db_insert(self.tradings, {'acc_id':self.liveId, '代碼': code, '報價': askPrice, '比重': percent,'數量': autoAmnt, '操作': '買入'})
return rs #return getOrderInfo(fidBroker)
except | |
<reponame>visdom2000/python-for-android<gh_stars>100-1000
from __future__ import print_function, unicode_literals
import sys
import types
import traceback
# Test imports.
import time
droid = None
skip_gui = False
fOutName = True
# tests for python modification for android {{{1
def test_029_isfile(): # issue #29 {{{1
import os
# FIXME: determine path to sdcard. like: path = os.environ[""]
path = os.path.dirname(__file__)
fname = os.path.abspath(os.path.join(path, "test_isfile"))
open(fname, "w").write("this is test")
os.path.isfile(fname)
os.remove(fname)
try:
assert os.path.isfile(fname) is False
except Exception as e:
print(e)
return False
return True
def test_047_ttyname(): # issue #47 {{{1
import os
try:
os.ttyname(0)
os.ttyname(1)
except Exception as e:
print(e)
return False
return True
def test_071_anydbm(): # issue #71 {{{1
import os
if sys.version_info[0] == 2:
import anydbm
else:
import dbm as anydbm
# FIXME: determine path to sdcard. like: path = os.environ[""]
del os.chmod
for fname in (
# failed: this is not SL4A application folder...
# os.path.join("/data/data/com.googlecode.pythonforandroid",
# "files", "test_anydbm.dbm"),
# OK: _chmod work well.
# os.path.join("/data/local/abc", "test_anydbm.dbm"),
# failed: _chmod not worked in FAT (SD card)
os.path.join("/sdcard", "sl4a", "test_anydbm.dbm"),
):
try:
os.remove(fname + ".dat")
except:
pass
anydbm.open(fname, "n")
os.remove(fname + ".dat")
return True
def test_075_httpserver(): # issue #75 {{{1
import time
import threading
if sys.version_info[0] == 2:
from BaseHTTPServer import BaseHTTPRequestHandler as handler
from BaseHTTPServer import HTTPServer
else:
from http.server import BaseHTTPRequestHandler as handler
from http.server import HTTPServer
fname = "/sdcard/sl4a/test_075.html"
port = 9090
class Handler(handler):
def do_GET(s):
open(fname, "w").write("""
<html><head></head><body>fine 075</body></html>""")
html = open(fname, 'rb')
s.send_response(200)
s.send_header("Content-Type", "text/html")
s.end_headers()
s.wfile.write(html.read())
server_class = HTTPServer
httpd = server_class(('', port), Handler)
if not skip_gui:
# and manual test has passed, open http://127.0.0.1:9090 in browser.
th = threading.Thread(target=httpd.serve_forever)
th.start()
droid.startActivity('android.intent.action.VIEW',
'http://127.0.0.1:9090/')
time.sleep(3)
httpd.shutdown()
return True
def test_106_https_certification_failed(): # issue #106 {{{1
if sys.version_info[0] == 2:
import urllib2
else:
from urllib import request as urllib2
import os
import take_cacert_pem
fname = take_cacert_pem.main()
if not fname:
return False
os.environ["SSL_CERT_FILE"] = fname
# input = input.replace("!web ", "")
url = "https://ccc.de/"
# url = "https://www.openssl.org/"
req = urllib2.Request(url)
info = urllib2.urlopen(req).read()
info
# Message.Chat.SendMessage("" + info)
'''not worked...
import httplib
c = httplib.HTTPSConnection("ccc.de")
c.request("GET", "/")
response = c.getresponse()
print("%s,%s" % (response.status, response.reason))
data = response.read()
print(data)
'''
return True
def test_107_large_file_report(): # issue #107 {{{1
import os
errors = []
fname = "sample.bin"
for n in (4294967294, 4294967297):
fp = open(fname, "wb")
fp.seek(n)
fp.write("1".encode("utf-8"))
fp.close()
ans = os.path.getsize(fname)
if ans != (n + 1):
errors.append("%s(answer) vs %s(expected)" % (ans, n + 1))
os.remove(fname)
if not errors:
return True
print("can't get size collectly with %s" % str(errors))
return False
def test_013s_scanBarcode(): # issue sl4a #13 {{{1
if not skip_gui:
code = droid.scanBarcode()
ext = code.result.get('extras', None)
if ext is None:
return False
if 'SCAN_RESULT_BYTES' not in ext or 'SCAN_RESULT' not in ext:
print("no results:" + str(ext))
return False
bts = ext['SCAN_RESULT_BYTES']
msg = ext['SCAN_RESULT']
print(msg, bts, len(bts))
return True
def test_009s_airplanemode(): # issue sl4a #9 {{{1
# this cause null pointer exception in Anroid 4.4>
ret = droid.checkAirplaneMode()
if ret.error:
return False
if fOutName:
print("%s" % ret.result, end="")
ret = droid.toggleAirplaneMode(True)
if ret.error:
return False
return True
def test_032s_wificonnect(): # issue sl4a #32 {{{1
method = "WPA2"
if method == "no-security":
cfg = dict(
SSID="invalidwifi",
# below parameters are not used in example of my expalation site.
# BSSID=,
# hiddenSSID=False,
# priority=,
# apBand=,
)
elif method == "WEP":
cfg = dict(
SSID="invalidwifi",
wepKeys=["key0"],
wepTxKeyIndex=0,
)
else: # elif method == "WPA2":
cfg = dict(
SSID="invalidwifi",
preSharedKey="<KEY>",
# or you can use: password="<PASSWORD>",
# be careful SL4A can't allow 64byte key.
)
droid.wifiConnect(cfg)
return True
# tests for some facade {{{1
def event_loop():
for i in range(10):
time.sleep(1)
droid.eventClearBuffer()
time.sleep(1)
e = droid.eventPoll(1)
if e.result is not None:
return True
return False
def test_imports():
try:
import termios
import bs4 as BeautifulSoup
import pyxmpp2 as xmpp
from xml.dom import minidom
except ImportError:
return False
return True
def test_clipboard():
previous = droid.getClipboard().result
msg = 'Hello, world!'
droid.setClipboard(msg)
echo = droid.getClipboard().result
droid.setClipboard(previous)
return echo == msg
def test_gdata():
if True:
try:
import gdata.docs.service
global skip_gui
if skip_gui:
return True
except:
return False
# Create a client class which will make HTTP requests with Google Docs server.
client = gdata.docs.service.DocsService()
# Authenticate using your Google Docs email address and password.
username = droid.dialogGetInput('Username').result
password = droid.dialogGetPassword('Password', 'For ' + username).result
try:
client.ClientLogin(username, password)
except:
return False
# Query the server for an Atom feed containing a list of your documents.
documents_feed = client.GetDocumentListFeed()
# Loop through the feed and extract each document entry.
return bool(list(documents_feed.entry))
def test_gps():
droid.startLocating()
try:
return event_loop()
finally:
droid.stopLocating()
def test_battery():
droid.batteryStartMonitoring()
time.sleep(1)
try:
return bool(droid.batteryGetStatus())
finally:
droid.batteryStopMonitoring()
def test_sensors():
ret = droid.startSensingTimed(1, 20)
if ret.error:
return False
try:
return event_loop()
finally:
droid.stopSensing()
def test_speak():
result = droid.ttsSpeak('Hello, world!')
return result.error is None
def test_phone_state():
droid.startTrackingPhoneState()
try:
return event_loop()
finally:
droid.stopTrackingPhoneState()
def test_ringer_silent():
result1 = droid.toggleRingerSilentMode()
result2 = droid.toggleRingerSilentMode()
return result1.error is None and result2.error is None
def test_ringer_volume():
get_result = droid.getRingerVolume()
if get_result.error is not None:
return False
droid.setRingerVolume(0)
set_result = droid.setRingerVolume(get_result.result)
if set_result.error is not None:
return False
return True
def test_get_last_known_location():
result = droid.getLastKnownLocation()
return result.error is None
def test_geocode():
result = droid.geocode(0.0, 0.0, 1)
return result.error is None
def test_wifi():
result1 = droid.toggleWifiState()
result2 = droid.toggleWifiState()
return result1.error is None and result2.error is None
def test_make_toast():
result = droid.makeToast('Hello, world!')
return result.error is None
def test_vibrate():
result = droid.vibrate()
return result.error is None
def test_notify():
result = droid.notify('Test Title', 'Hello, world!')
return result.error is None
def test_get_running_packages():
result = droid.getRunningPackages()
return result.error is None
# tests for USBSerialFacade {{{1
def test_usb(): # {{{2
result = droid.usbserialDeviceList()
if result.error is None:
print(result.data)
return True
return False
# tests for SL4A GUI parts {{{1
def test_alert_dialog(): # {{{2
global skip_gui
if skip_gui:
return None
title = 'User Interface'
message = 'Welcome to the SL4A integration test.'
droid.dialogCreateAlert(title, message)
droid.dialogSetPositiveButtonText('Continue')
droid.dialogShow()
response = droid.dialogGetResponse().result
return True
def test__alert_dialog_with_buttons(): # {{{2
global skip_gui
if skip_gui:
return None
title = 'Alert'
message = ('This alert box has 3 buttons and '
'will wait for you to press one.')
droid.dialogCreateAlert(title, message)
droid.dialogSetPositiveButtonText('Yes')
droid.dialogSetNegativeButtonText('No')
droid.dialogSetNeutralButtonText('Cancel')
droid.dialogShow()
response = droid.dialogGetResponse().result
assert response['which'] in ('positive', 'negative', 'neutral')
# print("debug:", response)
skip_gui = response['which'] == "negative"
return True
def test_spinner_progress(): # {{{2
title = 'Spinner'
message = 'This is simple spinner progress.'
droid.dialogCreateSpinnerProgress(title, message)
droid.dialogShow()
time.sleep(2)
droid.dialogDismiss()
return True
def test_horizontal_progress(): # {{{2
title = 'Horizontal'
message = 'This is simple horizontal progress.'
droid.dialogCreateHorizontalProgress(title, message, 50)
droid.dialogShow()
for x in range(0, 15):
time.sleep(0.1)
droid.dialogSetCurrentProgress(x)
droid.dialogDismiss()
return True
def test__alert_dialog_with_list(): # {{{2
global skip_gui
if skip_gui:
return None
title = 'Alert'
droid.dialogCreateAlert(title)
droid.dialogSetItems(['foo', 'bar', 'baz'])
droid.dialogShow()
response = droid.dialogGetResponse().result
# print("debug:", response)
skip_gui = response.item == 1
return True
def test__alert_dialog_with_single_choice_list(): # {{{2
global skip_gui
if skip_gui:
return None
title = 'GUI Test?'
droid.dialogCreateAlert(title)
droid.dialogSetSingleChoiceItems(['Continue', 'Skip', 'baz'])
droid.dialogSetPositiveButtonText('Yay!')
droid.dialogShow()
response = droid.dialogGetResponse().result
choices = droid.dialogGetSelectedItems().result
skip_gui = 1 in choices
return True
def test__alert_dialog_with_multi_choice_list(): # {{{2
global skip_gui
if skip_gui:
return None
title = 'Alert'
droid.dialogCreateAlert(title)
droid.dialogSetMultiChoiceItems(['foo', 'bar', 'baz'], [])
droid.dialogSetPositiveButtonText('Yay!')
droid.dialogShow()
response = droid.dialogGetResponse().result
choices = droid.dialogGetSelectedItems().result
# print("debug:", choices)
skip_gui = 1 in choices
return True
# tests for native module {{{1
def test_ssl():
try:
import ssl
except:
return False
# TODO: make test method
ssl # missing ssl extension?
return True
def test_ctypes():
try:
import ctypes
except:
return False
# TODO: make test method
ctypes # r17-22, this cause segfault error.
return True
def test_readline():
try:
import readline
except:
return False
# TODO: make test method
readline
return True
def test0_curses(): # {{{2
import os
if not os.environ.get("TERM", ""):
os.environ["TERM"] = "vt100"
os.environ["TERMINFO"] = ("/data/data/com.googlecode.pythonforandroid"
"/files/python/share/terminfo")
try:
import _curses
except:
return False
_curses.initscr()
_curses.endwin()
return True
def test_termios():
try:
import termios
except:
return False
# TODO: make test method
termios
return True
def test_bz2():
try:
import bz2
except:
return False
# TODO: make test method
bz2
return True
def test_expat():
try:
import pyexpat
except:
return False
# TODO: make test method
pyexpat
return True
def test_sqlite3():
try:
import sqlite3
except:
return False
# TODO: make test method
sqlite3
return True
# tests for pure python module {{{1
def test_bs():
try:
import BeautifulSoup
except:
return False
# TODO: make test method
BeautifulSoup
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.