input stringlengths 2.65k 237k | output stringclasses 1
value |
|---|---|
ParallelConv(nn.Module):
"""Layer of parallel convolutions with varying filter sizes followed by max over time pooling
This module takes an input tensor of any orientation based on its constructor, and pools its
output to shape `[B, H]`, where `H` is `outsz * len(filtsz)`
"""
def __init__(self, insz: int, outsz: int, filtsz: List[int], activation: str = "relu", input_fmt: str = "bth"):
"""
Constructor for a parallel convolution from any orientation tensor input
:param insz: The number of input feature maps
:param outsz: The number of output feature maps
:param filtsz: The kernel size as a list of parallel filters to apply, e.g. `[3, 4, 5]`
:param activation: An activation function by name to apply
:param input_fmt: A string for the orientation. Valid values are `bth` or `btc` meaning hidden units last,
`bht` or `bct` meaning the temporal dim last or `tbh` or `tbc` meaning the hidden units last and the temporal dim
first
"""
super().__init__()
self.requires_length = False
convs = []
outsz_filts = outsz
self.input_fmt = input_fmt.lower()
if type(outsz) == int:
outsz_filts = len(filtsz) * [outsz]
self.output_dim = sum(outsz_filts)
for i, fsz in enumerate(filtsz):
if fsz % 2 == 0:
conv = Conv1DSame(insz, outsz_filts[i], fsz)
else:
pad = fsz // 2
conv = nn.Conv1d(insz, outsz_filts[i], fsz, padding=pad)
conv = nn.Sequential(
conv,
get_activation(activation)
)
convs.append(conv)
# Add the module so its managed correctly
self.convs = nn.ModuleList(convs)
def transform_input(self, t: torch.Tensor) -> torch.Tensor:
if self.input_fmt == "bth" or self.input_fmt == "btc":
return bth2bht(t)
elif self.input_fmt == "tbh" or self.input_fmt == "tbc":
return tbh2bht(t)
else:
return t
def forward(self, inputs: torch.Tensor) -> torch.Tensor:
"""Transform the input to `[B, C, T]` from any orientation and perform parallel 1D convs and max over time pool
:param inputs: An input tensor of any format specified in the constructor
:return: A `[B, H]` tensor representing the pooled outputs
"""
mots = []
input_bct = self.transform_input(inputs)
for conv in self.convs:
# In Conv1d, data BxCxT, max over time
conv_out = conv(input_bct)
mot, _ = conv_out.max(2)
mots.append(mot)
mots = torch.cat(mots, 1)
return mots # self.conv_drop(mots)
class Highway(nn.Module):
"""Highway layer as defined in https://arxiv.org/abs/1505.00387
"""
def __init__(self, input_size: int, **kwargs):
"""Highway layer constructor
:param input_size: The input hidden size
:param kwargs:
"""
super().__init__()
self.proj = nn.Linear(input_size, input_size)
self.transform = nn.Linear(input_size, input_size)
self.transform.bias.data.fill_(-2.0)
self.output_dim = input_size
def forward(self, input: torch.Tensor) -> torch.Tensor:
"""Take a tensor in and produce the highway layer output
:param input: Input tensor
:return: output tensor
"""
proj_result = torch.relu(self.proj(input))
proj_gate = torch.sigmoid(self.transform(input))
gated = (proj_gate * proj_result) + ((1 - proj_gate) * input)
return gated
def pytorch_linear(in_sz: int, out_sz: int, unif: float = 0, initializer: str = None, bias: bool = True):
"""Utility function that wraps a linear (AKA dense) layer creation, with options for weight init and bias"""
l = nn.Linear(in_sz, out_sz, bias=bias)
if unif > 0:
l.weight.data.uniform_(-unif, unif)
elif initializer == "ortho":
nn.init.orthogonal(l.weight)
elif initializer == "he" or initializer == "kaiming":
nn.init.kaiming_uniform(l.weight)
else:
nn.init.xavier_uniform_(l.weight)
if bias:
l.bias.data.zero_()
return l
class StackedLSTMCell(nn.Module):
"""A stacked LSTM cells applied at a timestep
"""
def __init__(self, num_layers: int, input_size: int, rnn_size: int, dropout: float):
super().__init__()
self.dropout = nn.Dropout(dropout)
self.num_layers = num_layers
self.layers = nn.ModuleList()
for i in range(num_layers):
self.layers.append(nn.LSTMCell(input_size=input_size, hidden_size=rnn_size, bias=False))
input_size = rnn_size
def forward(self, input: torch.Tensor, hidden: torch.Tensor):
"""Apply a stack of LSTMs
:param input: The input to the first LSTM `[B, H]`
:param hidden: The previous `(h, c)` where `h=(h_0, h_1,..)`, `c=(c_0, c_1,..)`
:return: The output and hidden `(h, c)` where `h=(h_0, h_1,..)`, `c=(c_0, c_1,..)`
"""
h_0, c_0 = hidden
hs, cs = [], []
for i, layer in enumerate(self.layers):
h_i, c_i = layer(input, (h_0[i], c_0[i]))
input = h_i
if i != self.num_layers - 1:
input = self.dropout(input)
hs.append(h_i)
cs.append(c_i)
hs = torch.stack(hs)
cs = torch.stack(cs)
return input, (hs, cs)
class StackedGRUCell(nn.Module):
"""A stacked GRU cells applied at a timestep
"""
def __init__(self, num_layers: int, input_size: int, rnn_size: int, dropout: float):
super().__init__()
self.dropout = nn.Dropout(dropout)
self.num_layers = num_layers
self.layers = nn.ModuleList()
for i in range(num_layers):
self.layers.append(nn.GRUCell(input_size=input_size, hidden_size=rnn_size))
input_size = rnn_size
def forward(self, input: torch.Tensor, hidden: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
"""Apply a stack of GRUs
:param input: The input to the first LSTM `[B, H]`
:param hidden: The previous `h` where `h=(h_0, h_1,..)`
:return: The output and hidden `h` where `h=(h_0, h_1,..)`
"""
h_0 = hidden
hs = []
for i, layer in enumerate(self.layers):
h_i = layer(input, (h_0[i]))
input = h_i
if i != self.num_layers:
input = self.dropout(input)
hs.append(h_i)
hs = torch.stack(hs)
return input, hs
class Dense(nn.Module):
"""Dense (Linear) layer with optional activation given
This module is the equivalent of the tf.keras.layer.Dense, module with optional activations applied
"""
def __init__(
self,
insz: int,
outsz: int,
activation: Optional[str] = None,
unif: float = 0,
initializer: Optional[str] = None,
):
"""Constructor for "dense" or "linear" layer, with optional activation applied
:param insz: The number of hidden units in the input
:param outsz: The number of hidden units in the output
:param activation: The activation function by name, defaults to `None`, meaning no activation is applied
:param unif: An optional initialization value which can set the linear weights. If given, biases will init to 0
:param initializer: An initialization scheme by string name: `ortho`, `kaiming` or `he`, `xavier` or `glorot`
"""
super().__init__()
self.layer = pytorch_linear(insz, outsz, unif, initializer)
self.activation = get_activation(activation)
self.output_dim = outsz
def forward(self, input: torch.Tensor) -> torch.Tensor:
"""Run a linear projection over the input, followed by an optional activation given by constructor
:param input: the input tensor
:return: the transformed output
"""
return self.activation(self.layer(input))
class WeightTieDense(nn.Module):
"""Do weight tying from the input parameter
This module never copies the weight pointer, it lazily accesses to allow the tied variable to reset its parameters
after initialization. This is helpful for cases where we have LMs and are reloading them after they have been
initially created
"""
def __init__(self, tie: nn.Module, bias=False):
super().__init__()
self.tie = tie
self.transform = self._get_transform(tie)
if bias:
bias = torch.nn.Parameter(torch.zeros(self.transform(self.weight.shape[0])))
else:
bias = None
self.register_parameter("bias", bias)
def _get_transform(self, tie: nn.Module):
emb = getattr(tie, "embeddings", None)
if emb is not None:
return self._identity
return self._transpose
@property
def weight(self):
emb = getattr(self.tie, "embeddings", None)
if emb is not None:
return getattr(emb, "weight")
return getattr(self.tie, "weight")
def _identity(self, x: torch.Tensor) -> torch.Tensor:
return x
def _transpose(self, x: torch.Tensor) -> torch.Tensor:
return x.transpose(0, 1).contiguous()
def forward(self, input: torch.Tensor) -> torch.Tensor:
return F.linear(input, self.transform(self.weight), self.bias)
class ResidualBlock(nn.Module):
"""Create a residual block by wrapping an layer with a residual connection"""
def __init__(self, layer: Optional[nn.Module] = None, **kwargs):
"""Wrap an layer with a residual connection
:param layer: This layer will be applied to the input and added to the input
:param kwargs:
"""
super().__init__()
self.layer = layer
if self.layer is not None and hasattr(layer, "output_dim"):
self.output_dim = layer.output_dim
def forward(self, input: torch.Tensor) -> torch.Tensor:
"""Apply a residual block
:param input: A tensor to use as input and to add to output
:return: The residual connection output
"""
return input + self.layer(input)
class SkipConnection(ResidualBlock):
"""Subclass of ResidualBlock(Dense) with an activation function given
"""
def __init__(self, input_size: int, activation: str = "relu"):
"""Create a `SkipConnection`
:param input_size: The input dimension size
:param activation: A string activation name
"""
super().__init__(None)
self.layer = Dense(input_size, input_size, activation=activation)
self.output_dim = input_size
def rnn_cell(insz: int, hsz: int, rnntype: str, nlayers: int, dropout: float):
"""This is a wrapper function around a stacked RNN cell
:param insz: The input dimensions
:param hsz: The hidden dimensions
:param rnntype: An RNN type `gru` or `lstm`
:param nlayers: The number of layers to stack
:param dropout: The amount of dropout
:return:
"""
if rnntype == "gru":
rnn = StackedGRUCell(nlayers, insz, hsz, dropout)
else:
rnn = StackedLSTMCell(nlayers, insz, hsz, dropout)
return rnn
def pytorch_lstm(
insz: int,
hsz: int,
rnntype: str,
nlayers: int,
dropout: float,
unif: float = 0,
batch_first: bool = False,
initializer: str = None,
) -> torch.nn.LSTM:
"""Wrapper around `torch.nn.LSTM`, mainly for weight initialization options
:param insz: The input dimension
:param hsz: The number of hidden units
| |
# Copyright 2020 The PyMC Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import OrderedDict
import copy
import numpy as np
import theano.tensor as tt
from scipy.linalg import cholesky
from scipy.special import logsumexp
from scipy.stats import multivariate_normal, median_abs_deviation
from scipy.optimize import minimize, approx_fprime
from theano import function as theano_function
import arviz as az
import jax
import jax.numpy as jnp
from jax.experimental import optimizers as jax_optimizers
import time
import pymc3 as pm
import pymc3.nfmc.posdef as posdef
from pymc3.tuning.scaling import find_hessian
from pymc3.tuning.starting import find_MAP
from pymc3.backends.ndarray import NDArray, point_list_to_multitrace
from pymc3.blocking import ArrayOrdering, DictToArrayBijection
from pymc3.model import Point, modelcontext, set_data
from pymc3.distributions.distribution import draw_values, to_tuple
from pymc3.sampling import sample_prior_predictive
from pymc3.theanof import (
floatX,
inputvars,
join_nonshared_inputs,
make_shared_replacements,
gradient,
hessian,
)
from pymc3.util import (
check_start_vals,
get_default_varnames,
get_var_name,
update_start_vals,
)
from pymc3.vartypes import discrete_types, typefilter
# SINF code for fitting the normalizing flow.
from pymc3.sinf.GIS import GIS
import torch
# This is a global variable used to store the optimization steps.
# Presumably there's a nicer way to do this.
param_store = []
class NFMC:
"""Sequential type normalizing flow based sampling/global approx."""
def __init__(
self,
draws=500,
init_draws=500,
resampling_draws=500,
init_ess=100,
sample_mode='reinit',
cull_lowp_tol=0.05,
model=None,
init_method='prior',
init_samples=None,
start=None,
init_EL2O='adam',
use_hess_EL2O=False,
mean_field_EL2O=False,
absEL2O=1e-10,
fracEL2O=1e-2,
EL2O_draws=100,
maxiter_EL2O=500,
EL2O_optim_method='L-BFGS-B',
scipy_map_method='L-BFGS-B',
adam_lr=1e-3,
adam_b1=0.9,
adam_b2=0.999,
adam_eps=1.0e-8,
adam_steps=1000,
simulator=None,
model_data=None,
sim_data_cov=None,
sim_size=None,
sim_params=None,
sim_start=None,
sim_optim_method='lbfgs',
sim_tol=0.01,
local_thresh=3,
local_step_size=0.1,
local_grad=True,
init_local=True,
nf_local_iter=0,
max_line_search=100,
random_seed=-1,
chain=0,
frac_validate=0.1,
iteration=None,
final_iteration=None,
alpha=(0,0),
final_alpha=(0.75,0.75),
optim_iter=1000,
ftol=2.220446049250313e-9,
gtol=1.0e-5,
k_trunc=0.25,
verbose=False,
n_component=None,
interp_nbin=None,
KDE=True,
bw_factor_min=0.5,
bw_factor_max=2.5,
bw_factor_num=11,
edge_bins=None,
ndata_wT=None,
MSWD_max_iter=None,
NBfirstlayer=True,
logit=False,
Whiten=False,
batchsize=None,
nocuda=False,
patch=False,
shape=[28,28,1],
redraw=True,
):
self.draws = draws
self.init_draws = init_draws
self.resampling_draws = resampling_draws
self.init_ess = init_ess
self.sample_mode = sample_mode
self.cull_lowp_tol = cull_lowp_tol
self.model = model
# Init method params.
self.init_method = init_method
self.init_samples = init_samples
self.start = start
self.init_EL2O = init_EL2O
self.mean_field_EL2O = mean_field_EL2O
self.use_hess_EL2O = use_hess_EL2O
self.absEL2O = absEL2O
self.fracEL2O = fracEL2O
self.EL2O_draws = EL2O_draws
self.maxiter_EL2O = maxiter_EL2O
self.EL2O_optim_method = EL2O_optim_method
self.scipy_map_method = scipy_map_method
self.adam_lr = adam_lr
self.adam_b1 = adam_b1
self.adam_b2 = adam_b2
self.adam_eps = adam_eps
self.adam_steps = adam_steps
self.simulator = simulator
self.model_data = model_data
self.sim_data_cov = sim_data_cov
self.sim_size = sim_size
self.sim_params = sim_params
self.sim_start = sim_start
self.sim_optim_method = sim_optim_method
self.sim_tol = sim_tol
# Local exploration params.
self.local_thresh = local_thresh
self.local_step_size = local_step_size
self.local_grad = local_grad
self.init_local = init_local
self.nf_local_iter = nf_local_iter
self.max_line_search = max_line_search
self.random_seed = random_seed
self.chain = chain
# Set the torch seed.
if self.random_seed != 1:
np.random.seed(self.random_seed)
torch.manual_seed(self.random_seed)
# Separating out so I can keep track. These are SINF params.
assert 0.0 <= frac_validate <= 1.0
self.frac_validate = frac_validate
self.iteration = iteration
self.final_iteration = final_iteration
self.alpha = alpha
self.final_alpha = final_alpha
self.optim_iter = optim_iter
self.ftol = ftol
self.gtol = gtol
self.k_trunc = k_trunc
self.verbose = verbose
self.n_component = n_component
self.interp_nbin = interp_nbin
self.KDE = KDE
self.bw_factors = np.logspace(bw_factor_min, bw_factor_max, bw_factor_num)
self.edge_bins = edge_bins
self.ndata_wT = ndata_wT
self.MSWD_max_iter = MSWD_max_iter
self.NBfirstlayer = NBfirstlayer
self.logit = logit
self.Whiten = Whiten
self.batchsize = batchsize
self.nocuda = nocuda
self.patch = patch
self.shape = shape
#whether to redraw samples at every iteration, used for BO testing
self.redraw = redraw
self.model = modelcontext(model)
if self.random_seed != -1:
np.random.seed(self.random_seed)
self.variables = inputvars(self.model.vars)
def initialize_var_info(self):
"""Extract variable info for the model instance."""
var_info = OrderedDict()
init = self.model.test_point
for v in self.variables:
var_info[v.name] = (init[v.name].shape, init[v.name].size)
self.var_info = var_info
def initialize_population(self):
"""Create an initial population from the prior distribution."""
population = []
if self.init_samples is None:
init_rnd = sample_prior_predictive(
self.init_draws,
var_names=[v.name for v in self.model.unobserved_RVs],
model=self.model,
)
for i in range(self.init_draws):
point = Point({v.name: init_rnd[v.name][i] for v in self.variables}, model=self.model)
population.append(self.model.dict_to_array(point))
self.prior_samples = np.array(floatX(population))
elif self.init_samples is not None:
self.prior_samples = np.copy(self.init_samples)
self.weighted_samples = np.copy(self.prior_samples)
self.nf_samples = np.copy(self.weighted_samples)
self.get_posterior_logp()
self.get_prior_logp()
self.log_weight = self.posterior_logp - self.prior_logp
self.log_evidence = logsumexp(self.log_weight) - np.log(len(self.log_weight))
self.evidence = np.exp(self.log_evidence)
self.log_weight = self.log_weight - self.log_evidence
self.regularize_weights()
#same as in fitnf but prior~q
self.log_weight_pq_num = self.posterior_logp + 2*self.prior_logp
self.log_weight_pq_den = 3*self.prior_logp
self.log_evidence_pq = logsumexp(self.log_weight_pq_num) - logsumexp(self.log_weight_pq_den)
self.evidence_pq = np.exp(self.log_evidence_pq)
#sum of mean loss (p - q*Z_pq)^2 /N for diagnostic purposes
self.log_mean_loss = np.log(np.mean( ( np.exp(self.posterior_logp) - np.exp(self.prior_logp+self.log_evidence_pq) )**2 ))
self.init_weights_cleanup(lambda x: self.prior_logp(x), lambda x: self.prior_dlogp(x))
self.q_ess = self.calculate_ess(self.log_weight)
self.total_ess = self.calculate_ess(self.sinf_logw)
self.all_logq = np.array([])
self.nf_models = []
def setup_logp(self):
"""Set up the prior and likelihood logp functions, and derivatives."""
shared = make_shared_replacements(self.variables, self.model)
self.prior_logp_func = logp_forw([self.model.varlogpt], self.variables, shared)
self.prior_dlogp_func = logp_forw([gradient(self.model.varlogpt, self.variables)], self.variables, shared)
self.likelihood_logp_func = logp_forw([self.model.datalogpt], self.variables, shared)
self.posterior_logp_func = logp_forw([self.model.logpt], self.variables, shared)
self.posterior_dlogp_func = logp_forw([gradient(self.model.logpt, self.variables)], self.variables, shared)
self.posterior_hessian_func = logp_forw([hessian(self.model.logpt, self.variables)], self.variables, shared)
self.posterior_logp_nojac = logp_forw([self.model.logp_nojact], self.variables, shared)
self.posterior_dlogp_nojac = logp_forw([gradient(self.model.logp_nojact, self.variables)], self.variables, shared)
self.posterior_hessian_nojac = logp_forw([hessian(self.model.logp_nojact, self.variables)], self.variables, shared)
def get_prior_logp(self):
"""Get the prior log probabilities."""
priors = [self.prior_logp_func(sample) for sample in self.nf_samples]
self.prior_logp = np.array(priors).squeeze()
def get_likelihood_logp(self):
"""Get the likelihood log probabilities."""
likelihoods = [self.likelihood_logp_func(sample) for sample in self.nf_samples]
self.likelihood_logp = np.array(likelihoods).squeeze()
def get_posterior_logp(self):
"""Get the posterior log probabilities."""
posteriors = [self.posterior_logp_func(sample) for sample in self.nf_samples]
self.posterior_logp = np.array(posteriors).squeeze()
def optim_target_logp(self, param_vals):
"""Optimization target function"""
return -1.0 * self.posterior_logp_func(param_vals)
def optim_target_dlogp(self, param_vals):
return -1.0 * self.posterior_dlogp_func(param_vals)
def optim_target_logp_nojac(self, param_vals):
"""Optimization target function"""
return -1.0 * self.posterior_logp_nojac(param_vals)
def optim_target_dlogp_nojac(self, param_vals):
return -1.0 * self.posterior_dlogp_nojac(param_vals)
def prior_dlogp(self, param_vals):
dlogps = [self.prior_dlogp_func(val) for val in param_vals]
return np.array(dlogps).squeeze()
def target_logp(self, param_vals):
logps = [self.posterior_logp_func(val) for val in param_vals]
return np.array(logps).squeeze()
def target_dlogp(self, param_vals):
dlogps = [self.posterior_dlogp_func(val) for val in param_vals]
return np.array(dlogps).squeeze()
def target_hessian(self, param_vals):
hessians = [self.posterior_hessian_func(val) for val in param_vals]
return np.array(hessians).squeeze()
def target_logp_nojac(self, param_vals):
logps = [self.posterior_logp_nojac(val) for val in param_vals]
return np.array(logps).squeeze()
def target_dlogp_nojac(self, param_vals):
dlogps = [self.posterior_dlogp_nojac(val) for val in param_vals]
return np.array(dlogps).squeeze()
def target_hessian_nojac(self, param_vals):
hessians = [self.posterior_hessian_nojac(val) for val in param_vals]
return np.array(hessians).squeeze()
def sinf_logq(self, param_vals):
if param_vals.size == 1:
param_vals = np.array([param_vals])
sinf_logq = self.nf_model.evaluate_density(torch.from_numpy(param_vals.astype(np.float32))).numpy().astype(np.float64)
return sinf_logq.item()
def sinf_dlogq(self, param_vals):
if param_vals.size == 1:
param_vals = np.array([param_vals])
sinf_dlogq = self.nf_model.score(torch.from_numpy(param_vals.astype(np.float32))).numpy().astype(np.float64)
return sinf_dlogq.squeeze()
def callback(self, xk):
self.optim_iter_samples = np.append(self.optim_iter_samples, np.array([xk]), axis=0)
def optimize(self, sample):
"""Optimize the prior samples"""
self.optim_iter_samples = np.array([sample])
minimize(self.optim_target_logp, x0=sample, method=self.scipy_map_method,
options={'maxiter': self.optim_iter, 'ftol': self.ftol, 'gtol': self.gtol},
jac=self.optim_target_dlogp, callback=self.callback)
return self.optim_iter_samples
def get_MAP(self, map_method='adam', map_start=None):
"""Get the MAP estimate."""
if map_start is None:
map_start = self.start
if map_method == 'adam':
self.optimization_start()
opt_init, opt_update, get_params = jax_optimizers.adam(step_size=self.adam_lr, b1=self.adam_b1,
b2=self.adam_b2, eps=self.adam_eps)
opt_state = opt_init(map_start)
for i in range(self.adam_steps):
value, opt_state, update_params = self.update_adam(i, opt_state, opt_update, get_params)
target_diff = np.abs((value - np.float64(self.adam_logp(floatX(update_params)))) /
max(value, np.float64(self.adam_logp(floatX(update_params)))))
if target_diff <= self.ftol:
print(f'ADAM converged at step {i}')
break
vars = get_default_varnames(self.model.unobserved_RVs, include_transformed=True)
map_dict = {var.name: value for var, value in zip(vars, self.model.fastfn(vars)(self.bij.rmap(update_params.squeeze())))}
else:
map_dict = find_MAP(start=map_start, model=self.model, method=self.scipy_map_method)
return map_dict
def regularize_weights(self):
"""Apply clipping to importance weights."""
inf_weights = np.isinf(np.exp(self.log_weight))
self.log_weight = np.clip(self.log_weight, a_min=None, a_max=logsumexp(self.log_weight[~inf_weights])
- np.log(len(self.log_weight[~inf_weights])) + self.k_trunc * np.log(len(self.log_weight)))
self.weights = np.exp(self.log_weight)
def regularize_weights_pq(self):
"""Apply clipping to pq importance weights."""
inf_weights = np.isinf(np.exp(self.log_weight_pq))
self.log_weight_pq = np.clip(self.log_weight_pq, a_min=None, a_max=logsumexp(self.log_weight_pq[~inf_weights])
- np.log(len(self.log_weight_pq[~inf_weights])) + self.k_trunc * np.log(len(self.log_weight_pq)))
self.weights_pq = np.exp(self.log_weight_pq)
def calculate_ess(self, logw):
"""Calculate ESS given a set of sample weights"""
logw = logw - logsumexp(logw)
ess = np.exp(-logsumexp(2 * logw) - np.log(logw.shape[0]))
return ess
def calculate_weight_variance(self):
"""Calculates the variance of importance weights for a given q."""
return np.var(self.weight)
def shrink_init(self, mu, sigma):
"""Shrinks the initialization until we acheive some ESS."""
while self.q_ess * self.init_draws < self.init_ess:
previous_q_ess = 1.0 * self.q_ess
print(f'Shrinking intialization to improve ESS. Current ESS: {self.q_ess * self.init_draws}')
sigma = sigma / 2
self.weighted_samples = np.random.multivariate_normal(mu, sigma, size=self.init_draws)
self.nf_samples = np.copy(self.weighted_samples)
self.get_posterior_logp()
self.log_weight = self.posterior_logp - multivariate_normal.logpdf(self.nf_samples, mu.squeeze(), sigma, allow_singular=True)
self.log_evidence = logsumexp(self.log_weight) - np.log(len(self.log_weight))
self.evidence = np.exp(self.log_evidence)
self.log_weight = self.log_weight - self.log_evidence
#same as in fitnf but prior~q
self.log_weight_pq_num = self.posterior_logp + 2 * multivariate_normal.logpdf(self.nf_samples, mu.squeeze(), sigma, allow_singular=True)
self.log_weight_pq_den = 3*multivariate_normal.logpdf(self.nf_samples, mu.squeeze(), sigma, allow_singular=True)
self.log_evidence_pq = logsumexp(self.log_weight_pq_num) - logsumexp(self.log_weight_pq_den)
self.evidence_pq = np.exp(self.log_evidence_pq)
self.regularize_weights()
self.q_ess = self.calculate_ess(self.log_weight)
| |
"""
Methods for finding descendant entities (participants in families, biospecimens
in those participants, etc).
"""
from collections import defaultdict
from concurrent.futures import ThreadPoolExecutor, as_completed
import psycopg2
import psycopg2.extras
from kf_utils.dataservice.patch import hide_entities, unhide_entities
from kf_utils.dataservice.scrape import yield_entities
def _accumulate(func, *args, **kwargs):
return list(func(*args, **kwargs))
# Maps of direct foreign key descendancy from studies down to genomic files
# {parent_endpoint: [(child_endpoint, link_on_parent, link_on_child), ...], ...}
_db_descendancy = {
"study": [
("participant", "kf_id", "study_id"),
# We need to specially handle getting to families from studies, because
# the database layout does not match the logical data arrangement, so
# just add a stub here for family.
("family", None, None),
],
"family": [("participant", "kf_id", "family_id")],
"participant": [
("family_relationship", "kf_id", "participant1_id"),
("family_relationship", "kf_id", "participant2_id"),
("outcome", "kf_id", "participant_id"),
("phenotype", "kf_id", "participant_id"),
("diagnosis", "kf_id", "participant_id"),
("biospecimen", "kf_id", "participant_id"),
],
"biospecimen": [
("biospecimen_genomic_file", "kf_id", "biospecimen_id"),
("biospecimen_diagnosis", "kf_id", "biospecimen_id"),
],
"biospecimen_genomic_file": [("genomic_file", "genomic_file_id", "kf_id")],
"genomic_file": [
("read_group_genomic_file", "kf_id", "genomic_file_id"),
("sequencing_experiment_genomic_file", "kf_id", "genomic_file_id"),
("biospecimen_genomic_file", "kf_id", "genomic_file_id"),
],
"read_group_genomic_file": [("read_group", "read_group_id", "kf_id")],
"sequencing_experiment_genomic_file": [
("sequencing_experiment", "sequencing_experiment_id", "kf_id")
],
}
_api_descendancy = {
"studies": [
("participants", "kf_id", "study_id"),
("families", "kf_id", "study_id"),
],
"families": [("participants", "kf_id", "family_id")],
"participants": [
("family-relationships", "kf_id", "participant1_id"),
("family-relationships", "kf_id", "participant2_id"),
("outcomes", "kf_id", "participant_id"),
("phenotypes", "kf_id", "participant_id"),
("diagnoses", "kf_id", "participant_id"),
("biospecimens", "kf_id", "participant_id"),
],
"biospecimens": [
("genomic-files", "kf_id", "biospecimen_id"),
("biospecimen-diagnoses", "kf_id", "biospecimen_id"),
],
"genomic-files": [
("read-groups", "kf_id", "genomic_file_id"),
("read-group-genomic-files", "kf_id", "genomic_file_id"),
("sequencing-experiments", "kf_id", "genomic_file_id"),
("sequencing-experiment-genomic-files", "kf_id", "genomic_file_id"),
("biospecimen-genomic-files", "kf_id", "genomic_file_id"),
],
}
def find_gfs_with_extra_contributors(api_or_db_url, bs_kfids, gf_kfids=None):
"""
Given a set of biospecimen KFIDs, find the KFIDs of descendant genomic
files that also descend from biospecimens that aren't included in the given
set. If you already know the full set of descendant genomic files, you may
pass them in to save some time.
Special performance note: a database connect url will run MUCH faster
compared to a dataservice api host
:param api_or_db_url: dataservice api host _or_ database connect url
e.g. "https://kf-api-dataservice.kidsfirstdrc.org" or
"postgres://<USERNAME>:<PASSWORD>@kf-dataservice-postgres-prd.kids-first.io:5432/kfpostgresprd"
:param bs_kfids: iterable of biospecimen KFIDs
:param gf_kfids: iterable of genomic file KFIDs (optional)
:returns: sets of KFIDs of genomic files with contributing biospecimens not
included in bs_kfids, divided into these groups:
"all_visible": all extra contributors are visible in the dataservice
"all_hidden": all extra contributors are hidden in the dataservice
"mixed_visibility": some extra contributors are hidden and some not
Example:
If BS_12345678 and BS_87654321 both contribute to GF_11112222, but you only
specify one of the two BSIDs, then GF_11112222 will be returned. If you
specify both of them, then GF_11112222 will _not_ be returned. The exact
nature of the return will depend on the visibility of the extra
contributors.
"""
if api_or_db_url.startswith(("http:", "https:")):
return _find_gfs_with_extra_contributors_with_http_api(
api_or_db_url, bs_kfids, gf_kfids=None
)
else:
return _find_gfs_with_extra_contributors_with_db_conn(
api_or_db_url, bs_kfids, gf_kfids=None
)
def _find_gfs_with_extra_contributors_with_db_conn(
db_url, bs_kfids, gf_kfids=None
):
"""See find_gfs_with_extra_contributors"""
sql = (
"select distinct extra.genomic_file_id, biospecimen.visible from"
" biospecimen_genomic_file bg join biospecimen_genomic_file extra"
" on bg.genomic_file_id = extra.genomic_file_id"
" join biospecimen"
" on biospecimen.kf_id = extra.biospecimen_id"
" where bg.biospecimen_id in %s and extra.biospecimen_id not in %s"
)
bs_kfids = tuple(bs_kfids)
kfid_tuples = (bs_kfids, bs_kfids)
if gf_kfids:
kfid_tuples = (bs_kfids, bs_kfids, tuple(gf_kfids))
sql += " and extra.genomic_file_id in %s"
has_extra_contributors = {
"mixed_visibility": set(),
"hidden": set(),
"visible": set(),
}
storage = defaultdict(set)
with psycopg2.connect(db_url) as conn:
with conn.cursor(cursor_factory=psycopg2.extras.DictCursor) as cur:
cur.execute(sql, kfid_tuples)
for r in cur.fetchall():
storage[r["genomic_file_id"]].add(r["visible"])
for gfid, visset in storage.items():
if (False in visset) and (True in visset):
has_extra_contributors["mixed_visibility"].add(gfid)
elif False in visset:
has_extra_contributors["hidden"].add(gfid)
else:
has_extra_contributors["visible"].add(gfid)
return has_extra_contributors
def _find_gfs_with_extra_contributors_with_http_api(
api_url, bs_kfids, gf_kfids=None
):
"""See find_gfs_with_extra_contributors"""
bs_kfids = set(bs_kfids)
if not gf_kfids:
gf_kfids = set()
with ThreadPoolExecutor() as tpex:
futures = [
tpex.submit(
_accumulate,
yield_entities,
api_url,
"biospecimen-genomic-files",
{"biospecimen_id": k},
show_progress=True,
)
for k in bs_kfids
]
for f in as_completed(futures):
for bg in f.result():
gf_kfids.add(bg["_links"]["genomic_file"].rsplit("/", 1)[1])
else:
gf_kfids = set(gf_kfids)
has_extra_contributors = {
"mixed_visibility": set(),
"hidden": set(),
"visible": set(),
}
with ThreadPoolExecutor() as tpex:
futures = {
tpex.submit(
_accumulate,
yield_entities,
api_url,
"biospecimens",
{"genomic_file_id": g},
show_progress=True,
): g
for g in gf_kfids
}
for f in as_completed(futures):
g = futures[f]
contribs = {
bs["kf_id"]: (bs["visible"] is True) for bs in f.result()
}
contrib_kfids = set(contribs.keys())
if not contrib_kfids.issubset(bs_kfids):
extra_kfids = contrib_kfids - bs_kfids
extras_visible = set(contribs[k] for k in extra_kfids)
if (False in extras_visible) and (True in extras_visible):
has_extra_contributors["mixed_visibility"].add(g)
elif False in extras_visible:
has_extra_contributors["hidden"].add(g)
else:
has_extra_contributors["visible"].add(g)
return has_extra_contributors
def find_descendants_by_kfids(
api_or_db_url,
parent_endpoint,
parents,
ignore_gfs_with_hidden_external_contribs,
kfids_only=True,
):
"""
Given a set of KFIDs from a specified endpoint, find the KFIDs of all
descendant entities.
Given a family kfid, the result will be all participants in that family,
all of the participants' biospecimens/outcomes/phenotypes/etc, all of
their biospecimens' resultant genomic files, and all of the genomic files'
sequencing experiments and read groups.
Given a set of genomic file kfids, the result will be just their sequencing
experiments and read groups.
If you plan to make the discovered descendants visible, you should set
ignore_gfs_with_hidden_external_contribs=True so that you don't accidentally
unhide a genomic file that has hidden contributing biospecimens.
If you plan to make the discovered descendants hidden, you should set
ignore_gfs_with_hidden_external_contribs=False so that everything linked to
the hidden biospecimens also get hidden.
Special performance note: a database connect url will run MUCH faster
compared to a dataservice api host
:param api_or_db_url: dataservice api host _or_ database connect url
e.g. "https://kf-api-dataservice.kidsfirstdrc.org" or
"postgres://<USERNAME>:<PASSWORD>@kf-dataservice-postgres-prd.kids-first.io:5432/kfpostgresprd"
:param parent_endpoint: endpoint of the starting kfids being passed in
:param parents: iterable of starting kfids or entities associated with the
parent_endpoint
:param ignore_gfs_with_hidden_external_contribs: whether to ignore
genomic files (and their descendants) that contain information from
hidden biospecimens unrelated to the given parents.
:param kfids_only: only return KFIDs, not entire entities
:returns: dict mapping endpoints to their sets of discovered kfids
"""
use_api = api_or_db_url.startswith(("http:", "https:"))
if use_api:
parent_type = parent_endpoint
else:
endpoint_to_table = {
"studies": "study",
"participants": "participant",
"family-relationships": "family_relationship",
"outcomes": "outcome",
"phenotypes": "phenotype",
"diagnoses": "diagnosis",
"biospecimens": "biospecimen",
"families": "family",
"biospecimen-genomic-files": "biospecimen_genomic_file",
"biospecimen-diagnoses": "biospecimen_diagnosis",
"genomic-files": "genomic_file",
"read-group-genomic-files": "read_group_genomic_file",
"sequencing-experiment-genomic-files": "sequencing_experiment_genomic_file",
"read-groups": "read_group",
"sequencing-experiments": "sequencing_experiment",
}
table_to_endpoint = {v: k for k, v in endpoint_to_table.items()}
parent_type = endpoint_to_table[parent_endpoint]
if use_api:
descendancy = _api_descendancy
else:
descendancy = _db_descendancy
db_conn = psycopg2.connect(api_or_db_url)
db_cur = db_conn.cursor(cursor_factory=psycopg2.extras.DictCursor)
if isinstance(parents, str):
parents = [parents]
if isinstance(next(iter(parents), None), dict):
parent_kfids = set(p["kf_id"] for p in parents)
descendants = {parent_type: {p["kf_id"]: p for p in parents}}
else:
parent_kfids = set(parents)
if use_api:
descendants = {
parent_type: {
e["kf_id"]: e
for e in yield_entities(api_or_db_url, None, parent_kfids)
}
}
else:
query = f"select distinct * from {parent_type} where kf_id in %s"
db_cur.execute(query, (tuple(parent_kfids | {None}),))
descendants = {
parent_type: {p["kf_id"]: dict(p) for p in db_cur.fetchall()}
}
done = set()
for t in descendancy.keys():
if t != parent_type:
done.add(t)
else:
break
def _inner(parent_type, parent_kfids, descendants):
if parent_type in done:
return
done.add(parent_type)
for (child_type, link_on_parent, link_on_child) in descendancy.get(
parent_type, []
):
if use_api:
with ThreadPoolExecutor() as tpex:
futures = [
tpex.submit(
_accumulate,
yield_entities,
api_or_db_url,
child_type,
{link_on_child: k},
show_progress=True,
)
for k in parent_kfids
]
children = {
e["kf_id"]: e
for f in as_completed(futures)
for e in f.result()
}
else:
# special case for getting to families from studies
if parent_type == "study" and child_type == "family":
query = (
"select distinct family.* from family join participant"
" on participant.family_id = family.kf_id join study on"
" participant.study_id = study.kf_id where study.kf_id "
"in %s"
)
else:
query = (
f"select distinct {child_type}.* from {child_type} join {parent_type}"
f" on {child_type}.{link_on_child} = {parent_type}.{link_on_parent}"
f" where {parent_type}.kf_id in %s"
)
db_cur.execute(query, (tuple(parent_kfids | {None}),))
children = {c["kf_id"]: dict(c) for c in db_cur.fetchall()}
if children:
descendants[child_type] = descendants.get(child_type, dict())
descendants[child_type].update(children)
if (
child_type == "genomic_file"
) and ignore_gfs_with_hidden_external_contribs:
# Ignore multi-specimen genomic files that have hidden
# contributing specimens which are not in the descendants
extra_contrib_gfs = find_gfs_with_extra_contributors(
api_or_db_url,
descendants["biospecimen"],
descendants["genomic_file"],
)
to_remove = (
extra_contrib_gfs["hidden"]
| extra_contrib_gfs["mixed_visibility"]
)
descendants["genomic_file"] = {
k: v
for k, v in descendants["genomic_file"].items()
if k not in to_remove
}
for (child_type, _, _) in descendancy.get(parent_type, []):
if descendants.get(child_type):
_inner(child_type, descendants[child_type].keys(), descendants)
_inner(parent_type, parent_kfids, descendants)
if not use_api:
descendants = {table_to_endpoint[k]: v for k, v in descendants.items()}
| |
<gh_stars>1000+
import random
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from graph4nlp.pytorch.modules.utils.tree_utils import Tree, to_cuda
from .attention import Attention
from .base import RNNTreeDecoderBase
class StdTreeDecoder(RNNTreeDecoderBase):
r"""StdTreeDecoder: This is a tree decoder implementation, which is used for tree object decoding.
Attributes
----------
attn_type : str,
Describe which attention mechanism is used, can be ``uniform``,
``separate_on_encoder_type``, ``separate_on_node_type``.
embeddings : torch.nn.Module,
Embedding layer, input is tensor of word index, output is word embedding tensor.
enc_hidden_size : int,
Size of encoder hidden state.
dec_emb_size : int,
Size of decoder word embedding layer output size.
dec_hidden_size : int,
Size of decoder hidden state. (namely the ``lstm`` or ``gru``
hidden size when rnn unit has been specified)
output_size : int,
Size of output vocabulary size.
teacher_force_ratio : float,
The ratio of possibility to use teacher force training.
use_sibling : boolean,
Whether feed sibling state in each decoding step.
use_copy : boolean,
Whether use copy mechanism in decoding.
fuse_strategy: str, option=[None, "average", "concatenate"], default=None
The strategy to fuse attention results generated by separate attention.
"None": If we do ``uniform`` attention, we will set it to None.
"``average``": We will take an average on all results.
"``concatenate``": We will concatenate all results to one.
num_layers : int, optional,
Layer number of decoder rnn unit.
dropout_for_decoder: float,
Dropout ratio for decoder(include both the dropout for word embedding
and the dropout for attention layer)
tgt_vocab : object,
The vocab object used in decoder, including all the word<->id pairs
appeared in the output sentences.
graph_pooling_strategy : str,
The graph pooling strategy used to generate the graph embedding with node embeddings
rnn_type: str, optional,
The rnn unit is used, option=["lstm", "gru"], default="lstm".
max_dec_seq_length : int, optional,
In decoding, the decoding steps upper limit.
max_dec_tree_depth : int, optional,
In decoding, the tree depth lower limit.
"""
def __init__(
self,
attn_type,
embeddings,
enc_hidden_size,
dec_emb_size,
dec_hidden_size,
output_size,
criterion,
teacher_force_ratio,
use_sibling=True,
use_attention=True,
use_copy=False,
fuse_strategy="average",
num_layers=1,
dropout_for_decoder=0.1,
rnn_type="lstm",
max_dec_seq_length=512,
max_dec_tree_depth=256,
tgt_vocab=None,
graph_pooling_strategy="max",
):
super(StdTreeDecoder, self).__init__(
use_attention=True,
use_copy=use_copy,
use_coverage=False,
attention_type="uniform",
fuse_strategy="average",
)
self.num_layers = num_layers
self.criterion = criterion
self.rnn_size = dec_hidden_size
self.enc_hidden_size = enc_hidden_size
self.hidden_size = dec_hidden_size
self.max_dec_seq_length = max_dec_seq_length
self.max_dec_tree_depth = max_dec_tree_depth
self.tgt_vocab = tgt_vocab
self.teacher_force_ratio = teacher_force_ratio
self.use_sibling = use_sibling
self.dec_emb_size = dec_emb_size
self.dropout_input = dropout_for_decoder
self.embeddings = embeddings
self.graph_pooling_strategy = graph_pooling_strategy
self.attn_state = {}
self.use_copy = use_copy
self.attention = Attention(
query_size=dec_hidden_size,
memory_size=enc_hidden_size * 2
if (enc_hidden_size * 2 == dec_hidden_size)
else enc_hidden_size,
hidden_size=dec_hidden_size,
has_bias=True,
dropout=dropout_for_decoder,
attention_funtion="dot",
)
self.separate_attn = attn_type != "uniform"
if self.separate_attn:
self.linear_att = nn.Linear(3 * dec_hidden_size, dec_hidden_size)
else:
self.linear_att = nn.Linear(2 * dec_hidden_size, dec_hidden_size)
self.linear_out = nn.Linear(dec_hidden_size, output_size)
self.dropout_attn = nn.Dropout(dropout_for_decoder)
self.logsoftmax = nn.LogSoftmax(dim=1)
if self.use_copy:
ptr_size = self.embeddings.embedding_dim
ptr_size += 4 * self.rnn_size
self.ptr = nn.Linear(ptr_size, 1)
self.rnn = self._build_rnn(
rnn_type=rnn_type,
input_size=output_size,
emb_size=dec_emb_size,
hidden_size=dec_hidden_size,
dropout_input=dropout_for_decoder,
use_sibling=use_sibling,
)
def _run_forward_pass(
self,
graph_node_embedding,
graph_node_mask,
rnn_node_embedding,
graph_level_embedding,
graph_edge_embedding=None,
graph_edge_mask=None,
tgt_tree_batch=None,
enc_batch=None,
oov_dict=None,
):
r"""
The private calculation method for decoder.
Parameters
----------
enc_batch : torch.Tensor,
The input batch : (Batch_size * Source sentence word index tensor).
tgt_tree_batch:
The target tree to generate : consists of (Batch_size * Tree object),
each node in a Tree object is either a word index or a children Tree object.
graph_node_embedding: torch.Tensor,
The graph node embedding matrix of shape :math:`(B, N, D_{in})`
graph_node_mask: torch.Tensor,
The graph node type mask matrix of shape :math`(B, N)`
rnn_node_embedding: torch.Tensor,
The rnn encoded embedding matrix of shape :math`(B, N, D_{in})`
graph_level_embedding: torch.Tensor,
graph level embedding of shape :math`(B, D_{in})`
graph_edge_embedding: torch.Tensor,
graph edge embedding of shape :math`(B, N, D_{in})`
graph_edge_mask: torch.Tensor,
graph edge type embedding
oov_dict: dict,
vocab dict used in copy mechanism to incorporate some new words which
have never appeared in vocab for input sentences in training set.
"""
tgt_batch_size = len(tgt_tree_batch)
enc_outputs = graph_node_embedding
device = graph_node_embedding.device
if graph_level_embedding is None:
if self.graph_pooling_strategy == "max":
graph_level_embedding = torch.max(graph_node_embedding, 1)[0]
elif self.graph_pooling_strategy == "min":
graph_level_embedding = torch.min(graph_node_embedding, 1)[0]
elif self.graph_pooling_strategy == "mean":
graph_level_embedding = torch.mean(graph_node_embedding, 1)
else:
raise NotImplementedError()
graph_cell_state = graph_level_embedding
graph_hidden_state = graph_level_embedding
else:
graph_cell_state, graph_hidden_state = graph_level_embedding
# rnn_node_embedding = torch.zeros_like(graph_node_embedding,
# requires_grad=False).to(device)
cur_index = 1
loss = 0
dec_batch, queue_tree, max_index = get_dec_batch(
tgt_tree_batch, tgt_batch_size, device, self.tgt_vocab
)
dec_state = {}
for i in range(self.max_dec_tree_depth + 1):
dec_state[i] = {}
for j in range(self.max_dec_seq_length + 1):
dec_state[i][j] = {}
while cur_index <= max_index:
if cur_index > self.max_dec_tree_depth:
break
for j in range(1, 3):
dec_state[cur_index][0][j] = torch.zeros(
(tgt_batch_size, self.rnn_size), dtype=torch.float, requires_grad=False
).to(device)
sibling_state = torch.zeros(
(tgt_batch_size, self.rnn_size), dtype=torch.float, requires_grad=False
).to(device)
# with torch.no_grad():
if cur_index == 1:
for i in range(tgt_batch_size):
dec_state[1][0][1][i, :] = graph_cell_state[i]
dec_state[1][0][2][i, :] = graph_hidden_state[i]
else:
for i in range(1, tgt_batch_size + 1):
if cur_index <= len(queue_tree[i]):
par_index = queue_tree[i][cur_index - 1]["parent"]
child_index = queue_tree[i][cur_index - 1]["child_index"]
dec_state[cur_index][0][1][i - 1, :] = dec_state[par_index][child_index][1][
i - 1, :
]
dec_state[cur_index][0][2][i - 1, :] = dec_state[par_index][child_index][2][
i - 1, :
]
flag_sibling = False
for q_index in range(len(queue_tree[i])):
if (
(cur_index <= len(queue_tree[i]))
and (q_index < cur_index - 1)
and (
queue_tree[i][q_index]["parent"]
== queue_tree[i][cur_index - 1]["parent"]
)
and (
queue_tree[i][q_index]["child_index"]
< queue_tree[i][cur_index - 1]["child_index"]
)
):
flag_sibling = True
sibling_index = q_index
if flag_sibling:
sibling_state[i - 1, :] = dec_state[sibling_index][
dec_batch[sibling_index].size(1) - 1
][2][i - 1, :]
parent_h = dec_state[cur_index][0][2]
pred = None
for i in range(dec_batch[cur_index].size(1) - 1):
teacher_force = random.random() < self.teacher_force_ratio
if teacher_force is not True and i > 0:
input_word = pred.argmax(1)
else:
input_word = dec_batch[cur_index][:, i]
pred, rnn_state_iter, attn_scores = self.decode_step(
tgt_batch_size=tgt_batch_size,
dec_single_input=input_word,
dec_single_state=(dec_state[cur_index][i][1], dec_state[cur_index][i][2]),
memory=enc_outputs,
parent_state=parent_h,
oov_dict=oov_dict,
enc_batch=enc_batch,
)
dec_state[cur_index][i + 1][1], dec_state[cur_index][i + 1][2] = rnn_state_iter
pred = torch.log(pred + 1e-31)
loss += self.criterion(pred, dec_batch[cur_index][:, i + 1])
cur_index = cur_index + 1
loss = loss / tgt_batch_size
return loss
def _filter_oov(self, tokens, vocab):
r"""The function used to mask some oov word in word embedding layer."""
ret = tokens.clone()
ret[tokens >= vocab.vocab_size] = vocab.get_symbol_idx(vocab.unk_token)
return ret
def decode_step(
self,
tgt_batch_size,
dec_single_input,
dec_single_state,
memory,
parent_state,
input_mask=None,
memory_mask=None,
memory_candidate=None,
sibling_state=None,
oov_dict=None,
enc_batch=None,
):
"""The decoding function in tree decoder.
Parameters
----------
tgt_batch_size : int,
batch size.
dec_single_input : torch.Tensor,
word id matrix for decoder input: [B, N].
dec_single_state : torch.Tensor
the rnn decoding hidden state: [B, N, D].
memory : torch.Tensor
the encoder output node embedding.
parent_state : torch.Tensor
the parent embedding used in parent feeding mechanism.
input_mask : torch.Tensor, optional
input mask, by default None
memory_mask : torch.Tensor, optional
mask for encoder output, by default None
memory_candidate : torch.Tensor, optional
encoder output used for separate attention mechanism, by default None
sibling_state : torch.Tensor, optional
sibling state for sibling feeding mechanism, by default None
oov_dict : object, optional
out-of-vocabulary object for copy mechanism, by default None
enc_batch : torch.Tensor,
The input batch : (Batch_size * Source sentence word index tensor).
"""
device = memory.device
dec_single_input = self._filter_oov(dec_single_input, self.tgt_vocab)
rnn_state_c, rnn_state_h, dec_emb = self.rnn(
dec_single_input, dec_single_state[0], dec_single_state[1], parent_state, sibling_state
)
attn_collect = []
score_collect = []
if self.separate_attn:
pass
else:
context_vector, attn_scores = self.attention(query=rnn_state_h, memory=memory)
attn_collect.append(context_vector)
score_collect.append(attn_scores)
pred = F.tanh(self.linear_att(torch.cat((context_vector, rnn_state_h), 1)))
decoder_output = self.linear_out(self.dropout_attn(pred))
if self.use_copy:
assert enc_batch is not None
assert oov_dict is not None
output = torch.zeros(tgt_batch_size, oov_dict.vocab_size).to(device)
attn_ptr = torch.cat(attn_collect, dim=-1)
pgen_collect = [dec_emb, torch.cat((rnn_state_c, rnn_state_h), -1), attn_ptr]
prob_ptr = torch.sigmoid(self.ptr(torch.cat(pgen_collect, -1)))
prob_gen = 1 - prob_ptr
gen_output = torch.softmax(decoder_output, dim=-1)
ret = prob_gen * gen_output
need_pad_length = len(oov_dict) - len(self.tgt_vocab)
output = torch.cat((ret, ret.new_zeros((tgt_batch_size, need_pad_length))), dim=1)
# output[:, :self.tgt_vocab.vocab_size] = ret
ptr_output = attn_scores
output.scatter_add_(1, enc_batch, prob_ptr * ptr_output)
decoder_output = output
# decoder_output = -F.threshold(-output, -1.0, -1.0)
else:
decoder_output = torch.softmax(decoder_output, dim=-1)
return decoder_output, (rnn_state_c, rnn_state_h), attn_scores
def _build_rnn(self, rnn_type, input_size, emb_size, hidden_size, dropout_input, use_sibling):
"""_build_rnn : how the rnn unit should be build."""
rnn = TreeDecodingUnit(
input_size, emb_size, hidden_size, dropout_input, use_sibling, self.embeddings
)
return rnn
def forward(self, g, tgt_tree_batch=None, oov_dict=None):
params = self._extract_params(g)
params["tgt_tree_batch"] = tgt_tree_batch
params["oov_dict"] = oov_dict
return self._run_forward_pass(**params)
def _extract_params(self, graph_list):
"""
Parameters
----------
g: GraphData
Returns
-------
params: dict
"""
batch_data_dict = graph_list.batch_node_features
| |
the cable leading in.
# May 23, 2014: I think this is redundant as this will get caught when
# looking at the cables feeding into this step.
# else:
# generator = pipelinestep.cables_in.get(dest=socket)
# return (curr_run, generator)
# Now check if it's an output from this step.
generator = pipelinestep
for socket in pipelinestep.transformation.outputs.order_by("dataset_idx"):
key = (curr_run, generator, socket)
if key in self.socket_map and self.socket_map[key] == dataset_to_find:
return (curr_run, generator)
# Finally, check if it's at the end of a nontrivial Pipeline output cable.
for outcable in pipeline.outcables.order_by("output_idx"):
socket = outcable.dest
key = (curr_run, outcable, socket)
if key in self.socket_map and self.socket_map[key] == dataset_to_find:
return (curr_run, outcable)
# If we're here, we didn't find it.
return (None, None)
####
# Code to execute code in an MPI environment.
def enqueue_runnable_tasks(self, data_newly_available):
"""
Function that queues steps/outcables that are ready to run now that new data is available.
"""
for dataset in data_newly_available:
assert dataset.has_data() or self.find_dataset(dataset) is not None
# First, get anything that was waiting on this data to proceed.
taxiing_for_takeoff = []
for dataset in data_newly_available:
if dataset in self.tasks_waiting:
# Notify all tasks waiting on this dataset that it's now available.
# Trigger any tasks for which that was the last piece it was waiting on.
for task in self.tasks_waiting[dataset]:
self.waiting_for[task].remove(dataset)
if len(self.waiting_for[task]) == 0:
# Add this to the list of things that are ready to go.
taxiing_for_takeoff.append(task)
# Remove this entry from self.tasks_waiting.
self.tasks_waiting.pop(dataset)
self.queue_for_processing = self.queue_for_processing + taxiing_for_takeoff
def advance_pipeline(self, task_completed=None, run_to_advance=None, incables_completed=None,
steps_completed=None, outcables_completed=None):
"""
Proceed through a pipeline, seeing what can run now that a step or cable has just completed.
Note that if a sub-pipeline of the pipeline finishes, we report that the parent runstep
has finished, not the cables.
If task_completed is specified, that indicates that a new RunComponent has just finished
(i.e. by the fleet), so we attempt to advance the Pipeline.
If run_to_advance is specified, it means this is a recursive call, attempting to advance
a sub-Pipeline given the new stuff that has been finished so far (which is passed on
through the parameters incables_completed, steps_completed, and outcables_completed).
PRE:
at most one of run_to_advance and task_completed may not be None.
if task_completed is not None, it is finished and successful.
"""
assert (type(task_completed) in (RunStep,
RunSIC,
RunOutputCable) or
task_completed is None)
assert not (run_to_advance is not None and task_completed is not None)
incables_completed = incables_completed or []
steps_completed = steps_completed or []
outcables_completed = outcables_completed or []
run_to_resume = self.run
if run_to_advance:
assert run_to_advance.top_level_run == self.run
run_to_resume = run_to_advance
if task_completed is None and run_to_advance is None:
self.logger.debug('Starting run "%s"', self.run)
elif task_completed is not None:
self.logger.debug('Advancing run "%s" after completion of task %s (coordinates: %s)',
self.run,
task_completed,
task_completed.get_coordinates())
else: # run_to_advance is not None
self.logger.debug('Advancing sub-run "%s" (coordinates %s) of pipeline %s',
run_to_resume, run_to_resume.get_coordinates(), self.run)
if task_completed is None and not run_to_resume.has_started():
run_to_resume.start(save=True)
# Refresh the run plan, unless this is a recursive call that starts a new sub-Pipeline.
if not run_to_advance:
self.run_plan = RunPlan()
self.run_plan.load(self.run, self.inputs)
self.run_plan.find_consistent_execution()
pipeline_to_resume = run_to_resume.pipeline
if run_to_resume != self.run:
assert run_to_resume.top_level_run == self.run
sandbox_path = self.sandbox_path
if run_to_resume != self.run:
sandbox_path = (self.step_execute_info[(run_to_resume.parent_runstep.run,
run_to_resume.parent_runstep.pipelinestep)]
.step_run_dir)
# Update our lists of components completed.
step_nums_completed = []
if type(task_completed) == RunSIC:
assert task_completed.dest_runstep.pipelinestep.is_subpipeline()
incables_completed.append(task_completed)
elif type(task_completed) == RunStep:
steps_completed.append(task_completed)
elif type(task_completed) == RunOutputCable:
outcables_completed.append(task_completed)
elif task_completed is None and run_to_advance is None:
# This indicates that the only things accessible are the inputs.
step_nums_completed.append(0)
step_nums_completed += [x.step_num for x in steps_completed if x.run == run_to_resume]
# A tracker for whether everything is complete or not.
all_complete = True
# Go through steps in order, looking for input cables pointing at the task(s) that have completed.
# If task_completed is None, then we are starting the pipeline and we look at the pipeline inputs.
for step in pipeline_to_resume.steps.order_by("step_num"):
curr_RS = run_to_resume.runsteps.filter(pipelinestep=step).first()
assert curr_RS is not None
# If this is already running, we skip it, unless it's a sub-Pipeline.
if curr_RS.is_running():
if not step.is_subpipeline():
# This is a non-sub-run already in progress, so we leave it.
all_complete = False
continue
# At this point, we know this is a sub-Pipeline, and is possibly waiting
# for one of its input cables to finish.
if type(task_completed) == RunSIC:
feeder_RSICs = curr_RS.RSICs.filter(pk__in=[x.pk for x in incables_completed])
if not feeder_RSICs.exists():
# This isn't one of the RunSICs for this sub-Run.
all_complete = False
continue
else:
self.sub_pipeline_cable_tracker[curr_RS].difference_update(set(feeder_RSICs))
if len(self.sub_pipeline_cable_tracker[curr_RS]) != 0:
# Not all of the cables are done yet.
all_complete = False
continue
else:
# Look in the lists of tasks completed. Do any of them belong to this sub-run?
complete_subtask_exists = False
for task in itertools.chain(incables_completed, steps_completed, outcables_completed):
task_coords = task.get_coordinates()
curr_step_coords = curr_RS.get_coordinates()
if task_coords[0:len(curr_step_coords)] == curr_step_coords:
complete_subtask_exists = True
break
if not complete_subtask_exists:
continue
# Having reached here, we know that task_completed was either:
# - the last RunSIC the sub-Pipeline was waiting on, or
# - a task belonging to the sub-Run,
# so we can advance the sub-Run and update the lists of components
# completed.
incables_completed, steps_completed, outcables_completed = self.advance_pipeline(
run_to_advance=curr_RS.child_run,
incables_completed=incables_completed,
steps_completed=steps_completed,
outcables_completed=outcables_completed
)
curr_RS.refresh_from_db()
if curr_RS.child_run.is_cancelled():
curr_RS.cancel_running(save=True)
run_to_resume.cancel(save=True)
return incables_completed, steps_completed, outcables_completed
elif curr_RS.child_run.is_failed():
curr_RS.finish_failure(save=True)
run_to_resume.mark_failure(save=True)
return incables_completed, steps_completed, outcables_completed
elif curr_RS.child_run.is_successful():
curr_RS.finish_successfully(save=True)
else:
all_complete = False
# We've done all we can with this sub-Pipeline, so we move on to the next step.
continue
# Now, check that this step is still pending. If not, skip ahead.
if not curr_RS.is_pending():
continue
# If this step is not fed at all by any of the tasks that just completed,
# we skip it -- it can't have just become ready to go.
# Special case: this step has no inputs (for example, it's a random number generator).
# If so, we just go ahead.
fed_by_newly_completed = not step.cables_in.exists()
if step.cables_in.filter(source_step__in=step_nums_completed).exists():
fed_by_newly_completed = True
if not fed_by_newly_completed:
for cable in outcables_completed:
parent_runstep = cable.parent_run.parent_runstep
if parent_runstep is None or parent_runstep.run != run_to_resume:
continue
output_fed = parent_runstep.transformation.outputs.get(
dataset_idx=cable.pipelineoutputcable.output_idx
)
if step.cables_in.filter(source_step=parent_runstep.step_num, source=output_fed).exists():
fed_by_newly_completed = True
break
if not fed_by_newly_completed and run_to_resume.is_subrun:
# Check if this is fed by a completed incable (i.e. if this is part of a sub-Pipeline that is
# fed directly from the inputs).
pipeline_inputs_fed = []
for incable in incables_completed:
if run_to_resume.parent_runstep != incable.dest_runstep:
continue
pipeline_inputs_fed.append(incable.PSIC.dest)
are_any_used = step.cables_in.filter(source_step=0, source__in=pipeline_inputs_fed).exists()
if are_any_used:
fed_by_newly_completed = True
if not fed_by_newly_completed:
# This one certainly isn't getting completed now.
all_complete = False
continue
# Examine this step and see if all of the inputs are (at least symbolically) available.
step_inputs = []
# For each PSIC leading to this step, check if its required dataset is in the maps.
all_inputs_fed = True
for psic in step.cables_in.order_by("dest__dataset_idx"):
socket = psic.source.definite
run_to_query = run_to_resume
# If the PSIC comes from another step, the generator is the source pipeline step,
# or the output cable if it's a sub-pipeline.
if psic.source_step != 0:
generator = pipeline_to_resume.steps.get(step_num=psic.source_step)
if socket.transformation.is_pipeline():
run_to_query = run_to_resume.runsteps.get(pipelinestep=generator).child_run
generator = generator.transformation.pipeline.outcables.get(output_idx=socket.dataset_idx)
# Otherwise, the psic comes from step 0.
else:
# If this step is not a subpipeline, the dataset was uploaded.
generator = None
# If this step is a subpipeline, then the run we are interested in is the parent run.
# Get the run and cable that feeds this PSIC.
if run_to_resume.parent_runstep is not None:
run_to_query = run_to_resume.parent_runstep.run
cables_into_subpipeline = run_to_resume.parent_runstep.pipelinestep.cables_in
generator = cables_into_subpipeline.get(dest=psic.source)
if (run_to_query, generator, socket) in self.socket_map:
step_inputs.append(self.socket_map[(run_to_query, generator, socket)])
else:
all_inputs_fed = False
break
if not all_inputs_fed:
# This step cannot be run yet, so we move on.
all_complete = False
continue
# Start execution of this step.
curr_run_coords = run_to_resume.get_coordinates()
curr_run_plan = self.run_plan
for coord in curr_run_coords:
curr_run_plan = curr_run_plan.step_plans[coord-1].subrun_plan
assert curr_RS == curr_run_plan.step_plans[step.step_num-1].run_step
run_dir = os.path.join(sandbox_path, "step{}".format(step.step_num))
step_coords = curr_RS.get_coordinates()
| |
x), x, atanh(c * x)), x
)
def replacement6460(a, b, c, d, e, n, x):
return -Dist(
c * x * sqrt(S(1) - S(1) / (c ** S(2) * x ** S(2))) / sqrt(d + e * x ** S(2)),
Subst(Int((a + b * x) ** n / cosh(x), x), x, acoth(c * x)),
x,
)
def replacement6461(a, b, c, d, e, n, x):
return Dist(
sqrt(-(c ** S(2)) * x ** S(2) + S(1)) / sqrt(d + e * x ** S(2)),
Int(
(a + b * atanh(c * x)) ** n / (x * sqrt(-(c ** S(2)) * x ** S(2) + S(1))), x
),
x,
)
def replacement6462(a, b, c, d, e, n, x):
return Dist(
sqrt(-(c ** S(2)) * x ** S(2) + S(1)) / sqrt(d + e * x ** S(2)),
Int(
(a + b * acoth(c * x)) ** n / (x * sqrt(-(c ** S(2)) * x ** S(2) + S(1))), x
),
x,
)
def replacement6463(a, b, c, d, e, n, x):
return Dist(
b * c * n,
Int((a + b * atanh(c * x)) ** (n + S(-1)) / (x * sqrt(d + e * x ** S(2))), x),
x,
) - Simp((a + b * atanh(c * x)) ** n * sqrt(d + e * x ** S(2)) / (d * x), x)
def replacement6464(a, b, c, d, e, n, x):
return Dist(
b * c * n,
Int((a + b * acoth(c * x)) ** (n + S(-1)) / (x * sqrt(d + e * x ** S(2))), x),
x,
) - Simp((a + b * acoth(c * x)) ** n * sqrt(d + e * x ** S(2)) / (d * x), x)
def replacement6465(a, b, c, d, e, m, n, x):
return (
Dist(
c ** S(2) * (m + S(2)) / (m + S(1)),
Int(
x ** (m + S(2)) * (a + b * atanh(c * x)) ** n / sqrt(d + e * x ** S(2)),
x,
),
x,
)
- Dist(
b * c * n / (m + S(1)),
Int(
x ** (m + S(1))
* (a + b * atanh(c * x)) ** (n + S(-1))
/ sqrt(d + e * x ** S(2)),
x,
),
x,
)
+ Simp(
x ** (m + S(1))
* (a + b * atanh(c * x)) ** n
* sqrt(d + e * x ** S(2))
/ (d * (m + S(1))),
x,
)
)
def replacement6466(a, b, c, d, e, m, n, x):
return (
Dist(
c ** S(2) * (m + S(2)) / (m + S(1)),
Int(
x ** (m + S(2)) * (a + b * acoth(c * x)) ** n / sqrt(d + e * x ** S(2)),
x,
),
x,
)
- Dist(
b * c * n / (m + S(1)),
Int(
x ** (m + S(1))
* (a + b * acoth(c * x)) ** (n + S(-1))
/ sqrt(d + e * x ** S(2)),
x,
),
x,
)
+ Simp(
x ** (m + S(1))
* (a + b * acoth(c * x)) ** n
* sqrt(d + e * x ** S(2))
/ (d * (m + S(1))),
x,
)
)
def replacement6467(a, b, c, d, e, m, n, p, x):
return Dist(
S(1) / e,
Int(
x ** (m + S(-2))
* (a + b * atanh(c * x)) ** n
* (d + e * x ** S(2)) ** (p + S(1)),
x,
),
x,
) - Dist(
d / e,
Int(
x ** (m + S(-2)) * (a + b * atanh(c * x)) ** n * (d + e * x ** S(2)) ** p, x
),
x,
)
def replacement6468(a, b, c, d, e, m, n, p, x):
return Dist(
S(1) / e,
Int(
x ** (m + S(-2))
* (a + b * acoth(c * x)) ** n
* (d + e * x ** S(2)) ** (p + S(1)),
x,
),
x,
) - Dist(
d / e,
Int(
x ** (m + S(-2)) * (a + b * acoth(c * x)) ** n * (d + e * x ** S(2)) ** p, x
),
x,
)
def replacement6469(a, b, c, d, e, m, n, p, x):
return Dist(
S(1) / d,
Int(
x ** m * (a + b * atanh(c * x)) ** n * (d + e * x ** S(2)) ** (p + S(1)), x
),
x,
) - Dist(
e / d,
Int(
x ** (m + S(2)) * (a + b * atanh(c * x)) ** n * (d + e * x ** S(2)) ** p, x
),
x,
)
def replacement6470(a, b, c, d, e, m, n, p, x):
return Dist(
S(1) / d,
Int(
x ** m * (a + b * acoth(c * x)) ** n * (d + e * x ** S(2)) ** (p + S(1)), x
),
x,
) - Dist(
e / d,
Int(
x ** (m + S(2)) * (a + b * acoth(c * x)) ** n * (d + e * x ** S(2)) ** p, x
),
x,
)
def replacement6471(a, b, c, d, e, m, n, p, x):
return (
-Dist(
m / (b * c * (n + S(1))),
Int(
x ** (m + S(-1))
* (a + b * atanh(c * x)) ** (n + S(1))
* (d + e * x ** S(2)) ** p,
x,
),
x,
)
+ Dist(
c * (m + S(2) * p + S(2)) / (b * (n + S(1))),
Int(
x ** (m + S(1))
* (a + b * atanh(c * x)) ** (n + S(1))
* (d + e * x ** S(2)) ** p,
x,
),
x,
)
+ Simp(
x ** m
* (a + b * atanh(c * x)) ** (n + S(1))
* (d + e * x ** S(2)) ** (p + S(1))
/ (b * c * d * (n + S(1))),
x,
)
)
def replacement6472(a, b, c, d, e, m, n, p, x):
return (
-Dist(
m / (b * c * (n + S(1))),
Int(
x ** (m + S(-1))
* (a + b * acoth(c * x)) ** (n + S(1))
* (d + e * x ** S(2)) ** p,
x,
),
x,
)
+ Dist(
c * (m + S(2) * p + S(2)) / (b * (n + S(1))),
Int(
x ** (m + S(1))
* (a + b * acoth(c * x)) ** (n + S(1))
* (d + e * x ** S(2)) ** p,
x,
),
x,
)
+ Simp(
x ** m
* (a + b * acoth(c * x)) ** (n + S(1))
* (d + e * x ** S(2)) ** (p + S(1))
/ (b * c * d * (n + S(1))),
x,
)
)
def replacement6473(a, b, c, d, e, m, n, p, x):
return Dist(
c ** (-m + S(-1)) * d ** p,
Subst(
Int(
(a + b * x) ** n * sinh(x) ** m * cosh(x) ** (-m - S(2) * p + S(-2)), x
),
x,
atanh(c * x),
),
x,
)
def replacement6474(a, b, c, d, e, m, n, p, x):
return Dist(
d ** (p + S(1) / 2)
* sqrt(-(c ** S(2)) * x ** S(2) + S(1))
/ sqrt(d + e * x ** S(2)),
Int(
x ** m
* (a + b * atanh(c * x)) ** n
* (-(c ** S(2)) * x ** S(2) + S(1)) ** p,
x,
),
x,
)
def replacement6475(a, b, c, d, e, m, n, p, x):
return -Dist(
c ** (-m + S(-1)) * (-d) ** p,
Subst(
Int(
(a | |
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method get_run_downstream_lineage" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'owner' is set
if self.api_client.client_side_validation and ('owner' not in local_var_params or # noqa: E501
local_var_params['owner'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `owner` when calling `get_run_downstream_lineage`") # noqa: E501
# verify the required parameter 'entity' is set
if self.api_client.client_side_validation and ('entity' not in local_var_params or # noqa: E501
local_var_params['entity'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `entity` when calling `get_run_downstream_lineage`") # noqa: E501
# verify the required parameter 'uuid' is set
if self.api_client.client_side_validation and ('uuid' not in local_var_params or # noqa: E501
local_var_params['uuid'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `uuid` when calling `get_run_downstream_lineage`") # noqa: E501
collection_formats = {}
path_params = {}
if 'owner' in local_var_params:
path_params['owner'] = local_var_params['owner'] # noqa: E501
if 'entity' in local_var_params:
path_params['entity'] = local_var_params['entity'] # noqa: E501
if 'uuid' in local_var_params:
path_params['uuid'] = local_var_params['uuid'] # noqa: E501
query_params = []
if 'offset' in local_var_params and local_var_params['offset'] is not None: # noqa: E501
query_params.append(('offset', local_var_params['offset'])) # noqa: E501
if 'limit' in local_var_params and local_var_params['limit'] is not None: # noqa: E501
query_params.append(('limit', local_var_params['limit'])) # noqa: E501
if 'sort' in local_var_params and local_var_params['sort'] is not None: # noqa: E501
query_params.append(('sort', local_var_params['sort'])) # noqa: E501
if 'query' in local_var_params and local_var_params['query'] is not None: # noqa: E501
query_params.append(('query', local_var_params['query'])) # noqa: E501
if 'no_page' in local_var_params and local_var_params['no_page'] is not None: # noqa: E501
query_params.append(('no_page', local_var_params['no_page'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['ApiKey'] # noqa: E501
return self.api_client.call_api(
'/api/v1/{owner}/{entity}/runs/{uuid}/lineage/downstream', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1ListRunEdgesResponse', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def get_run_events(self, namespace, owner, project, uuid, kind, **kwargs): # noqa: E501
"""Get run events # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_run_events(namespace, owner, project, uuid, kind, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str namespace: namespace (required)
:param str owner: Owner of the namespace (required)
:param str project: Project where the run will be assigned (required)
:param str uuid: Uuid identifier of the entity (required)
:param str kind: The artifact kind (required)
:param str names: Names query param.
:param str orient: Orient query param.
:param bool force: Force query param.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1EventsResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.get_run_events_with_http_info(namespace, owner, project, uuid, kind, **kwargs) # noqa: E501
def get_run_events_with_http_info(self, namespace, owner, project, uuid, kind, **kwargs): # noqa: E501
"""Get run events # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_run_events_with_http_info(namespace, owner, project, uuid, kind, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str namespace: namespace (required)
:param str owner: Owner of the namespace (required)
:param str project: Project where the run will be assigned (required)
:param str uuid: Uuid identifier of the entity (required)
:param str kind: The artifact kind (required)
:param str names: Names query param.
:param str orient: Orient query param.
:param bool force: Force query param.
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1EventsResponse, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'namespace',
'owner',
'project',
'uuid',
'kind',
'names',
'orient',
'force'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method get_run_events" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'namespace' is set
if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
local_var_params['namespace'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `namespace` when calling `get_run_events`") # noqa: E501
# verify the required parameter 'owner' is set
if self.api_client.client_side_validation and ('owner' not in local_var_params or # noqa: E501
local_var_params['owner'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `owner` when calling `get_run_events`") # noqa: E501
# verify the required parameter 'project' is set
if self.api_client.client_side_validation and ('project' not in local_var_params or # noqa: E501
local_var_params['project'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `project` when calling `get_run_events`") # noqa: E501
# verify the required parameter 'uuid' is set
if self.api_client.client_side_validation and ('uuid' not in local_var_params or # noqa: E501
local_var_params['uuid'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `uuid` when calling `get_run_events`") # noqa: E501
# verify the required parameter 'kind' is set
if self.api_client.client_side_validation and ('kind' not in local_var_params or # noqa: E501
local_var_params['kind'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `kind` when calling `get_run_events`") # noqa: E501
collection_formats = {}
path_params = {}
if 'namespace' in local_var_params:
path_params['namespace'] = local_var_params['namespace'] # noqa: E501
if 'owner' in local_var_params:
path_params['owner'] = local_var_params['owner'] # noqa: E501
if 'project' in local_var_params:
path_params['project'] = local_var_params['project'] # noqa: E501
if 'uuid' in local_var_params:
path_params['uuid'] = local_var_params['uuid'] # noqa: E501
if 'kind' in local_var_params:
path_params['kind'] = local_var_params['kind'] # noqa: E501
query_params = []
if 'names' in local_var_params and local_var_params['names'] is not None: # noqa: E501
query_params.append(('names', local_var_params['names'])) # noqa: E501
if 'orient' in local_var_params and local_var_params['orient'] is not None: # noqa: E501
query_params.append(('orient', local_var_params['orient'])) # noqa: E501
if 'force' in local_var_params and local_var_params['force'] is not None: # noqa: E501
query_params.append(('force', local_var_params['force'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['ApiKey'] # noqa: E501
return self.api_client.call_api(
'/streams/v1/{namespace}/{owner}/{project}/runs/{uuid}/events/{kind}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1EventsResponse', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def get_run_logs(self, namespace, owner, project, uuid, **kwargs): # noqa: E501
"""Get run logs # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_run_logs(namespace, owner, project, uuid, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str namespace: (required)
:param str owner: Owner of the namespace (required)
:param str project: Project where the run will be assigned (required)
:param str uuid: Uuid identifier of the entity (required)
:param datetime last_time: last time.
:param str last_file: last file.
:param bool force: Force query param.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1Logs
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.get_run_logs_with_http_info(namespace, owner, project, uuid, **kwargs) # noqa: E501
def get_run_logs_with_http_info(self, namespace, owner, project, uuid, **kwargs): # noqa: E501
"""Get run logs # noqa: E501
This method makes a synchronous HTTP request by default. | |
<gh_stars>1-10
from ctypes import (Structure, POINTER, CFUNCTYPE, c_void_p, c_float,
c_int32, c_double, c_char, c_int16, c_int64)
from enum import IntEnum
# Corresponds to VstIntPtr in aeffect.h
# We're assuming we are working in 64bit
vst_int_ptr = c_int64
class AudioMasterOpcodes(IntEnum):
# [index]: parameter index [opt]: parameter value @see AudioEffect::setParameterAutomated
audioMasterAutomate = 0
# [return value]: Host VST version (for example 2400 for VST 2.4) @see AudioEffect::getMasterVersion
audioMasterVersion = 1
# [return value]: current unique identifier on shell plug-in @see AudioEffect::getCurrentUniqueId
audioMasterCurrentId = 2
# no arguments @see AudioEffect::masterIdle
audioMasterIdle = 3
# \deprecated deprecated in VST 2.4 r2
# DECLARE_VST_DEPRECATED (audioMasterPinConnected)
# \deprecated deprecated in VST 2.4
audioMasterWantMidi = 6
# [return value]: #VstTimeInfo* or null if not supported [value]: request mask @see VstTimeInfoFlags @see AudioEffectX::getTimeInfo
audioMasterGetTime = 7
# [ptr]: pointer to #VstEvents @see VstEvents @see AudioEffectX::sendVstEventsToHost
audioMasterProcessEvents = 8
# \deprecated deprecated in VST 2.4
# DECLARE_VST_DEPRECATED (audioMasterSetTime),
# \deprecated deprecated in VST 2.4
# DECLARE_VST_DEPRECATED (audioMasterTempoAt),
# \deprecated deprecated in VST 2.4
# DECLARE_VST_DEPRECATED (audioMasterGetNumAutomatableParameters),
# \deprecated deprecated in VST 2.4
# DECLARE_VST_DEPRECATED (audioMasterGetParameterQuantization),
# [return value]: 1 if supported @see AudioEffectX::ioChanged
audioMasterIOChanged = 13
# \deprecated deprecated in VST 2.4
# DECLARE_VST_DEPRECATED (audioMasterNeedIdle),
# [index]: new width [value]: new height [return value]: 1 if supported @see AudioEffectX::sizeWindow
audioMasterSizeWindow = 15
# [return value]: current sample rate @see AudioEffectX::updateSampleRate
audioMasterGetSampleRate = 16
# [return value]: current block size @see AudioEffectX::updateBlockSize
audioMasterGetBlockSize = 17
# [return value]: input latency in audio samples @see AudioEffectX::getInputLatency
audioMasterGetInputLatency = 18
# [return value]: output latency in audio samples @see AudioEffectX::getOutputLatency
audioMasterGetOutputLatency = 19
# \deprecated deprecated in VST 2.4
# DECLARE_VST_DEPRECATED (audioMasterGetPreviousPlug),
# \deprecated deprecated in VST 2.4
# DECLARE_VST_DEPRECATED (audioMasterGetNextPlug),
# \deprecated deprecated in VST 2.4
# DECLARE_VST_DEPRECATED (audioMasterWillReplaceOrAccumulate),
# [return value]: current process level @see VstProcessLevels
audioMasterGetCurrentProcessLevel = 23
# [return value]: current automation state @see VstAutomationStates
audioMasterGetAutomationState = 24
# [index]: numNewAudioFiles [value]: numAudioFiles [ptr]: #VstAudioFile* @see AudioEffectX::offlineStart
audioMasterOfflineStart = 25
# [index]: bool readSource [value]: #VstOfflineOption* @see VstOfflineOption [ptr]: #VstOfflineTask* @see VstOfflineTask @see AudioEffectX::offlineRead
audioMasterOfflineRead = 26
# @see audioMasterOfflineRead @see AudioEffectX::offlineRead
audioMasterOfflineWrite = 27
# @see AudioEffectX::offlineGetCurrentPass
audioMasterOfflineGetCurrentPass = 28
# @see AudioEffectX::offlineGetCurrentMetaPass
audioMasterOfflineGetCurrentMetaPass = 29
# \deprecated deprecated in VST 2.4
# DECLARE_VST_DEPRECATED (audioMasterSetOutputSampleRate),
# \deprecated deprecated in VST 2.4
# DECLARE_VST_DEPRECATED (audioMasterGetOutputSpeakerArrangement),
# [ptr]: char buffer for vendor string, limited to #kVstMaxVendorStrLen @see AudioEffectX::getHostVendorString
audioMasterGetVendorString = 32
# [ptr]: char buffer for vendor string, limited to #kVstMaxProductStrLen @see AudioEffectX::getHostProductString
audioMasterGetProductString = 33
# [return value]: vendor-specific version @see AudioEffectX::getHostVendorVersion
audioMasterGetVendorVersion = 34
# no definition, vendor specific handling @see AudioEffectX::hostVendorSpecific
audioMasterVendorSpecific = 35
# \deprecated deprecated in VST 2.4
# DECLARE_VST_DEPRECATED (audioMasterSetIcon),
# [ptr]: "can do" string [return value]: 1 for supported
audioMasterCanDo= 37
# [return value]: language code @see VstHostLanguage
audioMasterGetLanguage = 38
# \deprecated deprecated in VST 2.4
# DECLARE_VST_DEPRECATED (audioMasterOpenWindow),
# \deprecated deprecated in VST 2.4
# DECLARE_VST_DEPRECATED (audioMasterCloseWindow),
# [return value]: FSSpec on MAC, else char* @see AudioEffectX::getDirectory
audioMasterGetDirectory = 41
# no arguments
audioMasterUpdateDisplay = 42
# [index]: parameter index @see AudioEffectX::beginEdit
audioMasterBeginEdit = 43
# [index]: parameter index @see AudioEffectX::endEdit
audioMasterEndEdit = 44
# [ptr]: VstFileSelect* [return value]: 1 if supported @see AudioEffectX::openFileSelector
audioMasterOpenFileSelector = 45
# [ptr]: VstFileSelect* @see AudioEffectX::closeFileSelector
audioMasterCloseFileSelector = 46
# \deprecated deprecated in VST 2.4
# DECLARE_VST_DEPRECATED (audioMasterEditFile),
# \deprecated deprecated in VST 2.4 [ptr]: char[2048] or sizeof (FSSpec) [return value]: 1 if supported @see AudioEffectX::getChunkFile
# DECLARE_VST_DEPRECATED (audioMasterGetChunkFile),
# \deprecated deprecated in VST 2.4
# DECLARE_VST_DEPRECATED (audioMasterGetInputSpeakerArrangement)
class AEffectOpcodes(IntEnum):
# no arguments @see AudioEffect::open
effOpen = 0
# no arguments @see AudioEffect::close
effClose = 1
# [value]: new program number @see AudioEffect::setProgram
effSetProgram = 2
# [return value]: current program number @see AudioEffect::getProgram
effGetProgram = 3
# [ptr]: char* with new program name, limited to #kVstMaxProgNameLen @see AudioEffect::setProgramName
effSetProgramName = 4
# [ptr]: char buffer for current program name, limited to #kVstMaxProgNameLen @see AudioEffect::getProgramName
effGetProgramName = 5
# [ptr]: char buffer for parameter label, limited to #kVstMaxParamStrLen @see AudioEffect::getParameterLabel
effGetParamLabel = 6
# [ptr]: char buffer for parameter display, limited to #kVstMaxParamStrLen @see AudioEffect::getParameterDisplay
effGetParamDisplay = 7
# [ptr]: char buffer for parameter name, limited to #kVstMaxParamStrLen @see AudioEffect::getParameterName
effGetParamName = 8
# \deprecated deprecated in VST 2.4
# DECLARE_VST_DEPRECATED (effGetVu)
# [opt]: new sample rate for audio processing @see AudioEffect::setSampleRate
effSetSampleRate = 10
# [value]: new maximum block size for audio processing @see AudioEffect::setBlockSize
effSetBlockSize = 11
# [value]: 0 means "turn off", 1 means "turn on" @see AudioEffect::suspend @see AudioEffect::resume
effMainsChanged = 12
# [ptr]: #ERect** receiving pointer to editor size @see ERect @see AEffEditor::getRect
effEditGetRect = 13
# [ptr]: system dependent Window pointer, e.g. HWND on Windows @see AEffEditor::open
effEditOpen = 14
# no arguments @see AEffEditor::close
effEditClose = 15
# \deprecated deprecated in VST 2.4
# DECLARE_VST_DEPRECATED (effEditDraw)
# deprecated deprecated in VST 2.4
# DECLARE_VST_DEPRECATED (effEditMouse)
# deprecated deprecated in VST 2.4
# DECLARE_VST_DEPRECATED (effEditKey)
# no arguments @see AEffEditor::idle
effEditIdle = 19
# deprecated deprecated in VST 2.4
# DECLARE_VST_DEPRECATED (effEditTop)
# deprecated deprecated in VST 2.4
# DECLARE_VST_DEPRECATED (effEditSleep)
# deprecated deprecated in VST 2.4
# DECLARE_VST_DEPRECATED (effIdentify)
# [ptr]: void** for chunk data address [index]: 0 for bank, 1 for program @see AudioEffect::getChunk
effGetChunk = 23
# [ptr]: chunk data [value]: byte size [index]: 0 for bank, 1 for program @see AudioEffect::setChunk
effSetChunk = 24
# [ptr]: #VstEvents* @see AudioEffectX::processEvents
effProcessEvents = 25
# [index]: parameter index [return value]: 1=true, 0=false @see AudioEffectX::canParameterBeAutomated
effCanBeAutomated = 26
# [index]: parameter index [ptr]: parameter string [return value]: true for success @see AudioEffectX::string2parameter
effString2Parameter = 27
# \deprecated deprecated in VST 2.4
# DECLARE_VST_DEPRECATED (effGetNumProgramCategories)
# [index]: program index [ptr]: buffer for program name, limited to #kVstMaxProgNameLen [return value]: true for success @see AudioEffectX::getProgramNameIndexed
effGetProgramNameIndexed = 29
# \deprecated deprecated in VST 2.4
# DECLARE_VST_DEPRECATED (effCopyProgram)
# \deprecated deprecated in VST 2.4
# DECLARE_VST_DEPRECATED (effConnectInput)
# \deprecated deprecated in VST 2.4
# DECLARE_VST_DEPRECATED (effConnectOutput)
# [index]: input index [ptr]: #VstPinProperties* [return value]: 1 if supported @see AudioEffectX::getInputProperties
effGetInputProperties = 33
# [index]: output index [ptr]: #VstPinProperties* [return value]: 1 if supported @see AudioEffectX::getOutputProperties
effGetOutputProperties = 34
# [return value]: category @see VstPlugCategory @see AudioEffectX::getPlugCategory
effGetPlugCategory = 35
# \deprecated deprecated in VST 2.4
# DECLARE_VST_DEPRECATED (effGetCurrentPosition)
# \deprecated deprecated in VST 2.4
# DECLARE_VST_DEPRECATED (effGetDestinationBuffer)
# [ptr]: #VstAudioFile array [value]: count [index]: start flag @see AudioEffectX::offlineNotify
effOfflineNotify = 38
# [ptr]: #VstOfflineTask array [value]: count @see AudioEffectX::offlinePrepare
effOfflinePrepare = 39
# [ptr]: #VstOfflineTask array [value]: count @see AudioEffectX::offlineRun
effOfflineRun = 40
# [ptr]: #VstVariableIo* @see AudioEffectX::processVariableIo
effProcessVarIo = 41
# [value]: input #VstSpeakerArrangement* [ptr]: output #VstSpeakerArrangement* @see AudioEffectX::setSpeakerArrangement
effSetSpeakerArrangement = 42
# \deprecated deprecated in VST 2.4
# DECLARE_VST_DEPRECATED (effSetBlockSizeAndSampleRate)
# [value]: 1 = bypass, 0 = no bypass @see AudioEffectX::setBypass
effSetBypass = 44
# [ptr]: buffer for effect name limited to #kVstMaxEffectNameLen @see AudioEffectX::getEffectName
effGetEffectName = 45
# \deprecated deprecated in VST 2.4
# DECLARE_VST_DEPRECATED (effGetErrorText)
# [ptr]: buffer for effect vendor string, limited to #kVstMaxVendorStrLen @see AudioEffectX::getVendorString
effGetVendorString = 47
# [ptr]: buffer for effect vendor string, limited to #kVstMaxProductStrLen @see AudioEffectX::getProductString
effGetProductString = 48
# [return value]: vendor-specific version @see AudioEffectX::getVendorVersion
effGetVendorVersion = 49
# no definition, vendor specific handling @see AudioEffectX::vendorSpecific
effVendorSpecific = 50
# [ptr]: "can do" string [return value]: 0: "don't know" -1: "no" 1: "yes" @see AudioEffectX::canDo
effCanDo = 51
# [return value]: tail size (for example the reverb time of a reverb plug-in); 0 is default (return 1 for 'no tail')
effGetTailSize = 52
# \deprecated deprecated in VST 2.4
# DECLARE_VST_DEPRECATED (effIdle)
# \deprecated deprecated in VST 2.4
# DECLARE_VST_DEPRECATED (effGetIcon)
# \deprecated deprecated in VST 2.4
# DECLARE_VST_DEPRECATED (effSetViewPosition)
# [index]: parameter index [ptr]: #VstParameterProperties* [return value]: 1 if supported @see AudioEffectX::getParameterProperties
effGetParameterProperties = 56
# \deprecated deprecated in VST 2.4
# DECLARE_VST_DEPRECATED (effKeysRequired)
# [return value]: | |
result = instance_dispatch_table(instance)->CreateDebugReportCallbackEXT(instance, pCreateInfo, pAllocator, pCallback);')
r_body.append(' if (VK_SUCCESS == result) {')
r_body.append(' layer_data *my_data = get_my_data_ptr(get_dispatch_key(instance), layer_data_map);')
r_body.append(' result = layer_create_msg_callback(my_data->report_data, pCreateInfo, pAllocator, pCallback);')
r_body.append(' }')
r_body.append(' return result;')
r_body.append('}')
return "\n".join(r_body)
def _gen_destroy_msg_callback(self):
r_body = []
r_body.append('%s' % self.lineinfo.get())
r_body.append('VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkDestroyDebugReportCallbackEXT(VkInstance instance, VkDebugReportCallbackEXT msgCallback, const VkAllocationCallbacks *pAllocator)')
r_body.append('{')
# Switch to this code section for the new per-instance storage and debug callbacks
if self.layer_name in ['object_tracker', 'unique_objects']:
r_body.append(' VkLayerInstanceDispatchTable *pInstanceTable = get_dispatch_table(%s_instance_table_map, instance);' % self.layer_name )
else:
r_body.append(' VkLayerInstanceDispatchTable *pInstanceTable = instance_dispatch_table(instance);')
r_body.append(' pInstanceTable->DestroyDebugReportCallbackEXT(instance, msgCallback, pAllocator);')
r_body.append(' layer_data *my_data = get_my_data_ptr(get_dispatch_key(instance), layer_data_map);')
r_body.append(' layer_destroy_msg_callback(my_data->report_data, msgCallback, pAllocator);')
r_body.append('}')
return "\n".join(r_body)
def _gen_debug_report_msg(self):
r_body = []
r_body.append('%s' % self.lineinfo.get())
r_body.append('VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkDebugReportMessageEXT(VkInstance instance, VkDebugReportFlagsEXT flags, VkDebugReportObjectTypeEXT objType, uint64_t object, size_t location, int32_t msgCode, const char *pLayerPrefix, const char *pMsg)')
r_body.append('{')
# Switch to this code section for the new per-instance storage and debug callbacks
if self.layer_name == 'object_tracker':
r_body.append(' VkLayerInstanceDispatchTable *pInstanceTable = get_dispatch_table(%s_instance_table_map, instance);' % self.layer_name )
else:
r_body.append(' VkLayerInstanceDispatchTable *pInstanceTable = instance_dispatch_table(instance);')
r_body.append(' pInstanceTable->DebugReportMessageEXT(instance, flags, objType, object, location, msgCode, pLayerPrefix, pMsg);')
r_body.append('}')
return "\n".join(r_body)
def _gen_layer_get_global_extension_props(self, layer="object_tracker"):
ggep_body = []
# generated layers do not provide any global extensions
ggep_body.append('%s' % self.lineinfo.get())
ggep_body.append('')
if self.layer_name == 'object_tracker':
ggep_body.append('static const VkExtensionProperties instance_extensions[] = {')
ggep_body.append(' {')
ggep_body.append(' VK_EXT_DEBUG_REPORT_EXTENSION_NAME,')
ggep_body.append(' VK_EXT_DEBUG_REPORT_SPEC_VERSION')
ggep_body.append(' }')
ggep_body.append('};')
ggep_body.append('VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkEnumerateInstanceExtensionProperties(const char *pLayerName, uint32_t *pCount, VkExtensionProperties* pProperties)')
ggep_body.append('{')
if self.layer_name == 'object_tracker':
ggep_body.append(' return util_GetExtensionProperties(1, instance_extensions, pCount, pProperties);')
else:
ggep_body.append(' return util_GetExtensionProperties(0, NULL, pCount, pProperties);')
ggep_body.append('}')
return "\n".join(ggep_body)
def _gen_layer_get_global_layer_props(self, layer="object_tracker"):
ggep_body = []
layer_name = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', layer)
layer_name = re.sub('([a-z0-9])([A-Z])', r'\1_\2', layer_name).lower()
ggep_body.append('%s' % self.lineinfo.get())
ggep_body.append('static const VkLayerProperties globalLayerProps[] = {')
ggep_body.append(' {')
if self.layer_name in ['unique_objects']:
ggep_body.append(' "VK_LAYER_GOOGLE_%s",' % layer)
ggep_body.append(' VK_LAYER_API_VERSION, // specVersion')
ggep_body.append(' 1, // implementationVersion')
ggep_body.append(' "Google Validation Layer"')
else:
ggep_body.append(' "VK_LAYER_LUNARG_%s",' % layer)
ggep_body.append(' VK_LAYER_API_VERSION, // specVersion')
ggep_body.append(' 1, // implementationVersion')
ggep_body.append(' "LunarG Validation Layer"')
ggep_body.append(' }')
ggep_body.append('};')
ggep_body.append('')
ggep_body.append('%s' % self.lineinfo.get())
ggep_body.append('')
ggep_body.append('VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkEnumerateInstanceLayerProperties(uint32_t *pCount, VkLayerProperties* pProperties)')
ggep_body.append('{')
ggep_body.append(' return util_GetLayerProperties(ARRAY_SIZE(globalLayerProps), globalLayerProps, pCount, pProperties);')
ggep_body.append('}')
return "\n".join(ggep_body)
def _gen_layer_get_physical_device_layer_props(self, layer="object_tracker"):
gpdlp_body = []
gpdlp_body.append('%s' % self.lineinfo.get())
gpdlp_body.append('static const VkLayerProperties deviceLayerProps[] = {')
gpdlp_body.append(' {')
if self.layer_name in ['unique_objects']:
gpdlp_body.append(' "VK_LAYER_GOOGLE_%s",' % layer)
gpdlp_body.append(' VK_LAYER_API_VERSION, // specVersion')
gpdlp_body.append(' 1, // implementationVersion')
gpdlp_body.append(' "Google Validation Layer"')
else:
gpdlp_body.append(' "VK_LAYER_LUNARG_%s",' % layer)
gpdlp_body.append(' VK_LAYER_API_VERSION, // specVersion')
gpdlp_body.append(' 1, // implementationVersion')
gpdlp_body.append(' "LunarG Validation Layer"')
gpdlp_body.append(' }')
gpdlp_body.append('};')
gpdlp_body.append('VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkEnumerateDeviceLayerProperties(VkPhysicalDevice physicalDevice, uint32_t *pCount, VkLayerProperties* pProperties)')
gpdlp_body.append('{')
gpdlp_body.append(' return util_GetLayerProperties(ARRAY_SIZE(deviceLayerProps), deviceLayerProps, pCount, pProperties);')
gpdlp_body.append('}')
gpdlp_body.append('')
return "\n".join(gpdlp_body)
def _generate_dispatch_entrypoints(self, qual=""):
if qual:
qual += " "
funcs = []
intercepted = []
for proto in self.protos:
if proto.name == "GetDeviceProcAddr" or proto.name == "GetInstanceProcAddr":
continue
else:
intercept = self.generate_intercept(proto, qual)
if intercept is None:
# fill in default intercept for certain entrypoints
if 'CreateDebugReportCallbackEXT' == proto.name:
intercept = self._gen_layer_dbg_create_msg_callback()
elif 'DestroyDebugReportCallbackEXT' == proto.name:
intercept = self._gen_layer_dbg_destroy_msg_callback()
elif 'DebugReportMessageEXT' == proto.name:
intercept = self._gen_debug_report_msg()
elif 'CreateDevice' == proto.name:
funcs.append('/* CreateDevice HERE */')
elif 'EnumerateInstanceExtensionProperties' == proto.name:
intercept = self._gen_layer_get_global_extension_props(self.layer_name)
elif 'EnumerateInstanceLayerProperties' == proto.name:
intercept = self._gen_layer_get_global_layer_props(self.layer_name)
elif 'EnumerateDeviceLayerProperties' == proto.name:
intercept = self._gen_layer_get_physical_device_layer_props(self.layer_name)
if intercept is not None:
funcs.append(intercept)
if not "KHR" in proto.name:
intercepted.append(proto)
prefix="vk"
lookups = []
for proto in intercepted:
lookups.append("if (!strcmp(name, \"%s\"))" % proto.name)
lookups.append(" return (PFN_vkVoidFunction) %s%s;" %
(prefix, proto.name))
# add customized layer_intercept_proc
body = []
body.append('%s' % self.lineinfo.get())
body.append("static inline PFN_vkVoidFunction layer_intercept_proc(const char *name)")
body.append("{")
body.append(generate_get_proc_addr_check("name"))
body.append("")
body.append(" name += 2;")
body.append(" %s" % "\n ".join(lookups))
body.append("")
body.append(" return NULL;")
body.append("}")
# add layer_intercept_instance_proc
lookups = []
for proto in self.protos:
if not proto_is_global(proto):
continue
if not proto in intercepted:
continue
if proto.name == "CreateInstance":
continue
if proto.name == "CreateDevice":
continue
lookups.append("if (!strcmp(name, \"%s\"))" % proto.name)
lookups.append(" return (PFN_vkVoidFunction) %s%s;" % (prefix, proto.name))
body.append("static inline PFN_vkVoidFunction layer_intercept_instance_proc(const char *name)")
body.append("{")
body.append(generate_get_proc_addr_check("name"))
body.append("")
body.append(" name += 2;")
body.append(" %s" % "\n ".join(lookups))
body.append("")
body.append(" return NULL;")
body.append("}")
funcs.append("\n".join(body))
return "\n\n".join(funcs)
def _generate_extensions(self):
exts = []
exts.append('%s' % self.lineinfo.get())
exts.append(self._gen_create_msg_callback())
exts.append(self._gen_destroy_msg_callback())
exts.append(self._gen_debug_report_msg())
return "\n".join(exts)
def _generate_layer_gpa_function(self, extensions=[], instance_extensions=[]):
func_body = []
#
# New style of GPA Functions for the new layer_data/layer_logging changes
#
if self.layer_name in ['object_tracker', 'unique_objects']:
func_body.append("VK_LAYER_EXPORT VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL vkGetDeviceProcAddr(VkDevice device, const char* funcName)\n"
"{\n"
" PFN_vkVoidFunction addr;\n"
" if (!strcmp(\"vkGetDeviceProcAddr\", funcName)) {\n"
" return (PFN_vkVoidFunction) vkGetDeviceProcAddr;\n"
" }\n\n"
" addr = layer_intercept_proc(funcName);\n"
" if (addr)\n"
" return addr;\n"
" if (device == VK_NULL_HANDLE) {\n"
" return NULL;\n"
" }\n")
if 0 != len(extensions):
func_body.append('%s' % self.lineinfo.get())
func_body.append(' layer_data *my_device_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);')
for (ext_enable, ext_list) in extensions:
extra_space = ""
if 0 != len(ext_enable):
func_body.append(' if (my_device_data->%s) {' % ext_enable)
extra_space = " "
for ext_name in ext_list:
func_body.append(' %sif (!strcmp("%s", funcName))\n'
' %sreturn reinterpret_cast<PFN_vkVoidFunction>(%s);' % (extra_space, ext_name, extra_space, ext_name))
if 0 != len(ext_enable):
func_body.append(' }\n')
func_body.append("\n if (get_dispatch_table(%s_device_table_map, device)->GetDeviceProcAddr == NULL)\n"
" return NULL;\n"
" return get_dispatch_table(%s_device_table_map, device)->GetDeviceProcAddr(device, funcName);\n"
"}\n" % (self.layer_name, self.layer_name))
func_body.append("VK_LAYER_EXPORT VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL vkGetInstanceProcAddr(VkInstance instance, const char* funcName)\n"
"{\n"
" PFN_vkVoidFunction addr;\n"
" if (!strcmp(funcName, \"vkGetInstanceProcAddr\"))\n"
" return (PFN_vkVoidFunction) vkGetInstanceProcAddr;\n"
" if (!strcmp(funcName, \"vkCreateInstance\"))\n"
" return (PFN_vkVoidFunction) vkCreateInstance;\n"
" if (!strcmp(funcName, \"vkCreateDevice\"))\n"
" return (PFN_vkVoidFunction) vkCreateDevice;\n"
" addr = layer_intercept_instance_proc(funcName);\n"
" if (addr) {\n"
" return addr;"
" }\n"
" if (instance == VK_NULL_HANDLE) {\n"
" return NULL;\n"
" }\n"
)
table_declared = False
if 0 != len(instance_extensions):
for (ext_enable, ext_list) in instance_extensions:
extra_space = ""
if 0 != len(ext_enable):
if ext_enable == 'msg_callback_get_proc_addr':
func_body.append(" layer_data *my_data = get_my_data_ptr(get_dispatch_key(instance), layer_data_map);\n"
" addr = debug_report_get_instance_proc_addr(my_data->report_data, funcName);\n"
" if (addr) {\n"
" return addr;\n"
" }\n")
else:
if table_declared == False:
func_body.append(" VkLayerInstanceDispatchTable* pTable = get_dispatch_table(%s_instance_table_map, instance);" % self.layer_name)
table_declared = True
func_body.append(' if (instanceExtMap.size() != 0 && instanceExtMap[pTable].%s)' % ext_enable)
func_body.append(' {')
extra_space = " "
for ext_name in ext_list:
if wsi_name(ext_name):
func_body.append('%s' % wsi_ifdef(ext_name))
func_body.append(' %sif (!strcmp("%s", funcName))\n'
' return reinterpret_cast<PFN_vkVoidFunction>(%s);' % (extra_space, ext_name, ext_name))
if wsi_name(ext_name):
func_body.append('%s' % wsi_endif(ext_name))
if 0 != len(ext_enable):
func_body.append(' }\n')
func_body.append(" if (get_dispatch_table(%s_instance_table_map, instance)->GetInstanceProcAddr == NULL) {\n"
" return NULL;\n"
" }\n"
" return get_dispatch_table(%s_instance_table_map, instance)->GetInstanceProcAddr(instance, funcName);\n"
"}\n" % (self.layer_name, self.layer_name))
return "\n".join(func_body)
else:
func_body.append('%s' % self.lineinfo.get())
func_body.append("VK_LAYER_EXPORT VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL vkGetDeviceProcAddr(VkDevice device, const char* funcName)\n"
"{\n"
" PFN_vkVoidFunction addr;\n")
func_body.append("\n"
" loader_platform_thread_once(&initOnce, init%s);\n\n"
" if (!strcmp(\"vkGetDeviceProcAddr\", funcName)) {\n"
" return (PFN_vkVoidFunction) vkGetDeviceProcAddr;\n"
" }\n\n"
" addr = layer_intercept_proc(funcName);\n"
" if (addr)\n"
" return addr;" % self.layer_name)
func_body.append(" if (device == VK_NULL_HANDLE) {\n"
" return NULL;\n"
" }\n")
func_body.append('')
func_body.append(' VkLayerDispatchTable *pDisp = device_dispatch_table(device);')
if 0 != len(extensions):
extra_space = ""
for (ext_enable, ext_list) in extensions:
if 0 != len(ext_enable):
func_body.append(' if (deviceExtMap.size() != 0 && deviceExtMap[pDisp].%s)' % ext_enable)
func_body.append(' {')
extra_space = " "
for ext_name in ext_list:
func_body.append(' %sif (!strcmp("%s", funcName))\n'
' return reinterpret_cast<PFN_vkVoidFunction>(%s);' % (extra_space, ext_name, ext_name))
if 0 != len(ext_enable):
func_body.append(' }')
func_body.append('%s' % self.lineinfo.get())
func_body.append(" {\n"
" if (pDisp->GetDeviceProcAddr == NULL)\n"
" return NULL;\n"
" return pDisp->GetDeviceProcAddr(device, funcName);\n"
" }\n"
"}\n")
func_body.append('%s' % self.lineinfo.get())
func_body.append("VK_LAYER_EXPORT VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL vkGetInstanceProcAddr(VkInstance instance, const char* funcName)\n"
"{\n"
" PFN_vkVoidFunction addr;\n"
" if (!strcmp(funcName, \"vkGetInstanceProcAddr\"))\n"
" return (PFN_vkVoidFunction) vkGetInstanceProcAddr;\n"
" if (!strcmp(funcName, \"vkCreateInstance\"))\n"
" return (PFN_vkVoidFunction) vkCreateInstance;\n"
" if (!strcmp(funcName, \"vkCreateDevice\"))\n"
" return (PFN_vkVoidFunction) vkCreateDevice;\n"
)
func_body.append(
" loader_platform_thread_once(&initOnce, init%s);\n\n"
" addr = layer_intercept_instance_proc(funcName);\n"
" if (addr)\n"
" return addr;" % self.layer_name)
func_body.append(" if (instance == VK_NULL_HANDLE) {\n"
" return NULL;\n"
" }\n")
func_body.append("")
func_body.append(" VkLayerInstanceDispatchTable* pTable = instance_dispatch_table(instance);\n")
if 0 != len(instance_extensions):
extra_space = ""
for (ext_enable, ext_list) in instance_extensions:
if 0 != len(ext_enable):
if ext_enable == 'msg_callback_get_proc_addr':
func_body.append(" layer_data *my_data = get_my_data_ptr(get_dispatch_key(instance), layer_data_map);\n"
" addr = debug_report_get_instance_proc_addr(my_data->report_data, funcName);\n"
" if (addr) {\n"
" return addr;\n"
" }\n")
else:
func_body.append(' if (instanceExtMap.size() != 0 && instanceExtMap[pTable].%s)' % ext_enable)
func_body.append(' {')
extra_space = " "
for ext_name in ext_list:
if wsi_name(ext_name):
func_body.append('%s' % wsi_ifdef(ext_name))
func_body.append(' %sif (!strcmp("%s", funcName))\n'
' return reinterpret_cast<PFN_vkVoidFunction>(%s);' % (extra_space, ext_name, ext_name))
if wsi_name(ext_name):
func_body.append('%s' % wsi_endif(ext_name))
if | |
("", X, 1, "GL_TRANSFORM_FEEDBACK_VARYINGS"), # 0x8C83
("", I, 1, "GL_TRANSFORM_FEEDBACK_BUFFER_START"), # 0x8C84
("", I, 1, "GL_TRANSFORM_FEEDBACK_BUFFER_SIZE"), # 0x8C85
("", X, 1, "GL_TRANSFORM_FEEDBACK_RECORD_NV"), # 0x8C86
("", X, 1, "GL_PRIMITIVES_GENERATED"), # 0x8C87
("", X, 1, "GL_TRANSFORM_FEEDBACK_PRIMITIVES_WRITTEN"), # 0x8C88
("glGet", B, 1, "GL_RASTERIZER_DISCARD"), # 0x8C89
("", X, 1, "GL_MAX_TRANSFORM_FEEDBACK_INTERLEAVED_COMPONENTS"), # 0x8C8A
("", X, 1, "GL_MAX_TRANSFORM_FEEDBACK_SEPARATE_ATTRIBS"), # 0x8C8B
("", X, 1, "GL_INTERLEAVED_ATTRIBS"), # 0x8C8C
("", X, 1, "GL_SEPARATE_ATTRIBS"), # 0x8C8D
("", X, 1, "GL_TRANSFORM_FEEDBACK_BUFFER"), # 0x8C8E
("glGet", I, 1, "GL_TRANSFORM_FEEDBACK_BUFFER_BINDING"), # 0x8C8F
("", X, 1, "GL_ATC_RGB_AMD"), # 0x8C92
("", X, 1, "GL_ATC_RGBA_EXPLICIT_ALPHA_AMD"), # 0x8C93
("glGet", E, 1, "GL_POINT_SPRITE_COORD_ORIGIN"), # 0x8CA0
("", X, 1, "GL_LOWER_LEFT"), # 0x8CA1
("", X, 1, "GL_UPPER_LEFT"), # 0x8CA2
("", X, 1, "GL_STENCIL_BACK_REF"), # 0x8CA3
("", X, 1, "GL_STENCIL_BACK_VALUE_MASK"), # 0x8CA4
("", X, 1, "GL_STENCIL_BACK_WRITEMASK"), # 0x8CA5
("glGet", I, 1, "GL_DRAW_FRAMEBUFFER_BINDING"), # 0x8CA6
("glGet", I, 1, "GL_RENDERBUFFER_BINDING"), # 0x8CA7
("", I, 1, "GL_READ_FRAMEBUFFER"), # 0x8CA8
("", I, 1, "GL_DRAW_FRAMEBUFFER"), # 0x8CA9
("glGet", I, 1, "GL_READ_FRAMEBUFFER_BINDING"), # 0x8CAA
("glGetRenderbufferParameter", I, 1, "GL_RENDERBUFFER_SAMPLES"), # 0x8CAB
("", X, 1, "GL_DEPTH_COMPONENT32F"), # 0x8CAC
("", X, 1, "GL_DEPTH32F_STENCIL8"), # 0x8CAD
("glGetFramebufferAttachmentParameter", E, 1, "GL_FRAMEBUFFER_ATTACHMENT_OBJECT_TYPE"), # 0x8CD0
("glGetFramebufferAttachmentParameter", I, 1, "GL_FRAMEBUFFER_ATTACHMENT_OBJECT_NAME"), # 0x8CD1
("glGetFramebufferAttachmentParameter", I, 1, "GL_FRAMEBUFFER_ATTACHMENT_TEXTURE_LEVEL"), # 0x8CD2
("glGetFramebufferAttachmentParameter", E, 1, "GL_FRAMEBUFFER_ATTACHMENT_TEXTURE_CUBE_MAP_FACE"), # 0x8CD3
("glGetFramebufferAttachmentParameter", I, 1, "GL_FRAMEBUFFER_ATTACHMENT_TEXTURE_LAYER"), # 0x8CD4
("", X, 1, "GL_FRAMEBUFFER_COMPLETE"), # 0x8CD5
("", X, 1, "GL_FRAMEBUFFER_INCOMPLETE_ATTACHMENT"), # 0x8CD6
("", X, 1, "GL_FRAMEBUFFER_INCOMPLETE_MISSING_ATTACHMENT"), # 0x8CD7
("", X, 1, "GL_FRAMEBUFFER_INCOMPLETE_DIMENSIONS_EXT"), # 0x8CD9
("", X, 1, "GL_FRAMEBUFFER_INCOMPLETE_FORMATS_EXT"), # 0x8CDA
("", X, 1, "GL_FRAMEBUFFER_INCOMPLETE_DRAW_BUFFER"), # 0x8CDB
("", X, 1, "GL_FRAMEBUFFER_INCOMPLETE_READ_BUFFER"), # 0x8CDC
("", X, 1, "GL_FRAMEBUFFER_UNSUPPORTED"), # 0x8CDD
("glGet", I, 1, "GL_MAX_COLOR_ATTACHMENTS"), # 0x8CDF
("", X, 1, "GL_COLOR_ATTACHMENT0"), # 0x8CE0
("", X, 1, "GL_COLOR_ATTACHMENT1"), # 0x8CE1
("", X, 1, "GL_COLOR_ATTACHMENT2"), # 0x8CE2
("", X, 1, "GL_COLOR_ATTACHMENT3"), # 0x8CE3
("", X, 1, "GL_COLOR_ATTACHMENT4"), # 0x8CE4
("", X, 1, "GL_COLOR_ATTACHMENT5"), # 0x8CE5
("", X, 1, "GL_COLOR_ATTACHMENT6"), # 0x8CE6
("", X, 1, "GL_COLOR_ATTACHMENT7"), # 0x8CE7
("", X, 1, "GL_COLOR_ATTACHMENT8"), # 0x8CE8
("", X, 1, "GL_COLOR_ATTACHMENT9"), # 0x8CE9
("", X, 1, "GL_COLOR_ATTACHMENT10"), # 0x8CEA
("", X, 1, "GL_COLOR_ATTACHMENT11"), # 0x8CEB
("", X, 1, "GL_COLOR_ATTACHMENT12"), # 0x8CEC
("", X, 1, "GL_COLOR_ATTACHMENT13"), # 0x8CED
("", X, 1, "GL_COLOR_ATTACHMENT14"), # 0x8CEE
("", X, 1, "GL_COLOR_ATTACHMENT15"), # 0x8CEF
("", X, 1, "GL_DEPTH_ATTACHMENT"), # 0x8D00
("", X, 1, "GL_STENCIL_ATTACHMENT"), # 0x8D20
("", X, 1, "GL_FRAMEBUFFER"), # 0x8D40
("", X, 1, "GL_RENDERBUFFER"), # 0x8D41
("glGetRenderbufferParameter", I, 1, "GL_RENDERBUFFER_WIDTH"), # 0x8D42
("glGetRenderbufferParameter", I, 1, "GL_RENDERBUFFER_HEIGHT"), # 0x8D43
("glGetRenderbufferParameter", E, 1, "GL_RENDERBUFFER_INTERNAL_FORMAT"), # 0x8D44
("", X, 1, "GL_STENCIL_INDEX1"), # 0x8D46
("", X, 1, "GL_STENCIL_INDEX4"), # 0x8D47
("", X, 1, "GL_STENCIL_INDEX8"), # 0x8D48
("", X, 1, "GL_STENCIL_INDEX16"), # 0x8D49
("glGetRenderbufferParameter", I, 1, "GL_RENDERBUFFER_RED_SIZE"), # 0x8D50
("glGetRenderbufferParameter", I, 1, "GL_RENDERBUFFER_GREEN_SIZE"), # 0x8D51
("glGetRenderbufferParameter", I, 1, "GL_RENDERBUFFER_BLUE_SIZE"), # 0x8D52
("glGetRenderbufferParameter", I, 1, "GL_RENDERBUFFER_ALPHA_SIZE"), # 0x8D53
("glGetRenderbufferParameter", I, 1, "GL_RENDERBUFFER_DEPTH_SIZE"), # 0x8D54
("glGetRenderbufferParameter", I, 1, "GL_RENDERBUFFER_STENCIL_SIZE"), # 0x8D55
("", X, 1, "GL_FRAMEBUFFER_INCOMPLETE_MULTISAMPLE"), # 0x8D56
("glGet", I, 1, "GL_MAX_SAMPLES"), # 0x8D57
("", X, 1, "GL_TEXTURE_GEN_STR_OES"), # 0x8D60
("", X, 1, "GL_HALF_FLOAT_OES"), # 0x8D61
("", X, 1, "GL_RGB565_OES"), # 0x8D62
("", X, 1, "GL_ETC1_RGB8_OES"), # 0x8D64
("", X, 1, "GL_TEXTURE_EXTERNAL_OES"), # 0x8D65
("", X, 1, "GL_SAMPLER_EXTERNAL_OES"), # 0x8D66
("", X, 1, "GL_TEXTURE_BINDING_EXTERNAL_OES"), # 0x8D67
("", X, 1, "GL_REQUIRED_TEXTURE_IMAGE_UNITS_OES"), # 0x8D68
("", B, 1, "GL_PRIMITIVE_RESTART_FIXED_INDEX"), # 0x8D69
("", X, 1, "GL_ANY_SAMPLES_PASSED_CONSERVATIVE"), # 0x8D6A
("glGet", I, 1, "GL_MAX_ELEMENT_INDEX"), # 0x8D6B
("", X, 1, "GL_FRAMEBUFFER_ATTACHMENT_TEXTURE_SAMPLES_EXT"), # 0x8D6C
("", X, 1, "GL_RGBA32UI"), # 0x8D70
("", X, 1, "GL_RGB32UI"), # 0x8D71
("", X, 1, "GL_ALPHA32UI_EXT"), # 0x8D72
("", X, 1, "GL_INTENSITY32UI_EXT"), # 0x8D73
("", X, 1, "GL_LUMINANCE32UI_EXT"), # 0x8D74
("", X, 1, "GL_LUMINANCE_ALPHA32UI_EXT"), # 0x8D75
("", X, 1, "GL_RGBA16UI"), # 0x8D76
("", X, 1, "GL_RGB16UI"), # 0x8D77
("", X, 1, "GL_ALPHA16UI_EXT"), # 0x8D78
("", X, 1, "GL_INTENSITY16UI_EXT"), # 0x8D79
("", X, 1, "GL_LUMINANCE16UI_EXT"), # 0x8D7A
("", X, 1, "GL_LUMINANCE_ALPHA16UI_EXT"), # 0x8D7B
("", X, 1, "GL_RGBA8UI"), # 0x8D7C
("", X, 1, "GL_RGB8UI"), # 0x8D7D
("", X, 1, "GL_ALPHA8UI_EXT"), # 0x8D7E
("", X, 1, "GL_INTENSITY8UI_EXT"), # 0x8D7F
("", X, 1, "GL_LUMINANCE8UI_EXT"), # 0x8D80
("", X, 1, "GL_LUMINANCE_ALPHA8UI_EXT"), # 0x8D81
("", X, 1, "GL_RGBA32I"), # 0x8D82
("", X, 1, "GL_RGB32I"), # 0x8D83
("", X, 1, "GL_ALPHA32I_EXT"), # 0x8D84
("", X, 1, "GL_INTENSITY32I_EXT"), # 0x8D85
("", X, 1, "GL_LUMINANCE32I_EXT"), # 0x8D86
("", X, 1, "GL_LUMINANCE_ALPHA32I_EXT"), # 0x8D87
("", X, 1, "GL_RGBA16I"), # 0x8D88
("", X, 1, "GL_RGB16I"), # 0x8D89
("", X, 1, "GL_ALPHA16I_EXT"), # 0x8D8A
("", X, 1, "GL_INTENSITY16I_EXT"), # 0x8D8B
("", X, 1, "GL_LUMINANCE16I_EXT"), # 0x8D8C
("", X, 1, "GL_LUMINANCE_ALPHA16I_EXT"), # 0x8D8D
("", X, 1, "GL_RGBA8I"), # 0x8D8E
("", X, 1, "GL_RGB8I"), # 0x8D8F
("", X, 1, "GL_ALPHA8I_EXT"), # 0x8D90
("", X, 1, "GL_INTENSITY8I_EXT"), # 0x8D91
("", X, 1, "GL_LUMINANCE8I_EXT"), # 0x8D92
("", X, 1, "GL_LUMINANCE_ALPHA8I_EXT"), # 0x8D93
("", X, 1, "GL_RED_INTEGER"), # 0x8D94
("", X, 1, "GL_GREEN_INTEGER"), # 0x8D95
("", X, 1, "GL_BLUE_INTEGER"), # 0x8D96
("", X, 1, "GL_ALPHA_INTEGER"), # 0x8D97
("", X, 1, "GL_RGB_INTEGER"), # 0x8D98
("", X, 1, "GL_RGBA_INTEGER"), # 0x8D99
("", X, 1, "GL_BGR_INTEGER"), # 0x8D9A
("", X, 1, "GL_BGRA_INTEGER"), # 0x8D9B
("", X, 1, "GL_LUMINANCE_INTEGER_EXT"), # 0x8D9C
("", X, 1, "GL_LUMINANCE_ALPHA_INTEGER_EXT"), # 0x8D9D
("glGet", B, 1, "GL_RGBA_INTEGER_MODE_EXT"), # 0x8D9E
("", X, 1, "GL_INT_2_10_10_10_REV"), # 0x8D9F
("", X, 1, "GL_MAX_PROGRAM_PARAMETER_BUFFER_BINDINGS_NV"), # 0x8DA0
("", X, 1, "GL_MAX_PROGRAM_PARAMETER_BUFFER_SIZE_NV"), # 0x8DA1
("", X, 1, "GL_VERTEX_PROGRAM_PARAMETER_BUFFER_NV"), # 0x8DA2
("", X, 1, "GL_GEOMETRY_PROGRAM_PARAMETER_BUFFER_NV"), # 0x8DA3
("", X, 1, "GL_FRAGMENT_PROGRAM_PARAMETER_BUFFER_NV"), # 0x8DA4
("", X, 1, "GL_MAX_PROGRAM_GENERIC_ATTRIBS_NV"), # 0x8DA5
("", X, 1, "GL_MAX_PROGRAM_GENERIC_RESULTS_NV"), # 0x8DA6
("glGetFramebufferAttachmentParameter", B, 1, "GL_FRAMEBUFFER_ATTACHMENT_LAYERED"), # 0x8DA7
("", X, 1, "GL_FRAMEBUFFER_INCOMPLETE_LAYER_TARGETS"), # 0x8DA8
("", X, 1, "GL_FRAMEBUFFER_INCOMPLETE_LAYER_COUNT_ARB"), # 0x8DA9
("", X, 1, "GL_LAYER_NV"), # 0x8DAA
("", X, 1, "GL_DEPTH_COMPONENT32F_NV"), # 0x8DAB
("", X, 1, "GL_DEPTH32F_STENCIL8_NV"), # 0x8DAC
("", X, 1, "GL_FLOAT_32_UNSIGNED_INT_24_8_REV"), # 0x8DAD
("", X, 1, "GL_SHADER_INCLUDE_ARB"), # 0x8DAE
("", X, 1, "GL_DEPTH_BUFFER_FLOAT_MODE_NV"), # 0x8DAF
("glGet", B, 1, "GL_FRAMEBUFFER_SRGB"), # 0x8DB9
("glGet", B, 1, "GL_FRAMEBUFFER_SRGB_CAPABLE_EXT"), # 0x8DBA
("", X, 1, "GL_COMPRESSED_RED_RGTC1"), # 0x8DBB
("", X, 1, "GL_COMPRESSED_SIGNED_RED_RGTC1"), # 0x8DBC
("", X, 1, "GL_COMPRESSED_RG_RGTC2"), # 0x8DBD
("", X, 1, "GL_COMPRESSED_SIGNED_RG_RGTC2"), # 0x8DBE
("", X, 1, "GL_SAMPLER_1D_ARRAY"), # 0x8DC0
("", X, 1, "GL_SAMPLER_2D_ARRAY"), # 0x8DC1
("", X, 1, "GL_SAMPLER_BUFFER"), # 0x8DC2
("", X, 1, "GL_SAMPLER_1D_ARRAY_SHADOW"), # 0x8DC3
("", X, 1, "GL_SAMPLER_2D_ARRAY_SHADOW"), # 0x8DC4
("", X, 1, "GL_SAMPLER_CUBE_SHADOW"), # 0x8DC5
("", X, 1, "GL_UNSIGNED_INT_VEC2"), # 0x8DC6
("", X, 1, "GL_UNSIGNED_INT_VEC3"), # 0x8DC7
("", X, 1, "GL_UNSIGNED_INT_VEC4"), # 0x8DC8
("", X, 1, "GL_INT_SAMPLER_1D"), # 0x8DC9
("", X, 1, "GL_INT_SAMPLER_2D"), # 0x8DCA
("", X, 1, "GL_INT_SAMPLER_3D"), # 0x8DCB
("", X, 1, "GL_INT_SAMPLER_CUBE"), # 0x8DCC
("", X, 1, "GL_INT_SAMPLER_2D_RECT"), # 0x8DCD
("", X, 1, "GL_INT_SAMPLER_1D_ARRAY"), # 0x8DCE
("", X, 1, "GL_INT_SAMPLER_2D_ARRAY"), # 0x8DCF
("", X, 1, "GL_INT_SAMPLER_BUFFER"), # 0x8DD0
("", X, 1, "GL_UNSIGNED_INT_SAMPLER_1D"), # 0x8DD1
("", X, 1, "GL_UNSIGNED_INT_SAMPLER_2D"), # 0x8DD2
("", X, 1, "GL_UNSIGNED_INT_SAMPLER_3D"), # 0x8DD3
("", X, 1, "GL_UNSIGNED_INT_SAMPLER_CUBE"), # 0x8DD4
("", X, 1, "GL_UNSIGNED_INT_SAMPLER_2D_RECT"), # 0x8DD5
("", X, 1, "GL_UNSIGNED_INT_SAMPLER_1D_ARRAY"), # 0x8DD6
("", X, 1, "GL_UNSIGNED_INT_SAMPLER_2D_ARRAY"), # 0x8DD7
("", X, 1, "GL_UNSIGNED_INT_SAMPLER_BUFFER"), # 0x8DD8
("glGetProgramPipeline", I, 1, "GL_GEOMETRY_SHADER"), # 0x8DD9
("glGetProgram", I, 1, "GL_GEOMETRY_VERTICES_OUT_ARB"), # 0x8DDA
("glGetProgram", E, 1, "GL_GEOMETRY_INPUT_TYPE_ARB"), # 0x8DDB
("glGetProgram", E, 1, "GL_GEOMETRY_OUTPUT_TYPE_ARB"), # 0x8DDC
("glGet", I, 1, "GL_MAX_GEOMETRY_VARYING_COMPONENTS_ARB"), # 0x8DDD
("glGet", I, 1, "GL_MAX_VERTEX_VARYING_COMPONENTS_ARB"), # 0x8DDE
("glGet", I, 1, "GL_MAX_GEOMETRY_UNIFORM_COMPONENTS"), # 0x8DDF
("glGet", I, 1, "GL_MAX_GEOMETRY_OUTPUT_VERTICES"), # 0x8DE0
("glGet", I, 1, "GL_MAX_GEOMETRY_TOTAL_OUTPUT_COMPONENTS"), # 0x8DE1
("", X, 1, "GL_MAX_VERTEX_BINDABLE_UNIFORMS_EXT"), # 0x8DE2
("", X, 1, "GL_MAX_FRAGMENT_BINDABLE_UNIFORMS_EXT"), # 0x8DE3
("", X, 1, "GL_MAX_GEOMETRY_BINDABLE_UNIFORMS_EXT"), # 0x8DE4
("", X, 1, "GL_ACTIVE_SUBROUTINES"), # 0x8DE5
("", X, 1, "GL_ACTIVE_SUBROUTINE_UNIFORMS"), # 0x8DE6
("glGet", I, 1, "GL_MAX_SUBROUTINES"), # 0x8DE7
("glGet", I, 1, "GL_MAX_SUBROUTINE_UNIFORM_LOCATIONS"), # 0x8DE8
("glGetNamedString", X, 1, "GL_NAMED_STRING_LENGTH_ARB"), # 0x8DE9
("glGetNamedString", E, 1, "GL_NAMED_STRING_TYPE_ARB"), # 0x8DEA
("glGet", I, 1, "GL_MAX_BINDABLE_UNIFORM_SIZE_EXT"), # 0x8DED
("", X, 1, "GL_UNIFORM_BUFFER_EXT"), # 0x8DEE
("glGet", I, 1, "GL_UNIFORM_BUFFER_BINDING_EXT"), # 0x8DEF
("", X, 1, "GL_LOW_FLOAT"), # 0x8DF0
("", X, 1, "GL_MEDIUM_FLOAT"), # 0x8DF1
("", X, 1, "GL_HIGH_FLOAT"), # 0x8DF2
("", X, 1, "GL_LOW_INT"), # 0x8DF3
("", X, 1, "GL_MEDIUM_INT"), # 0x8DF4
("", X, 1, "GL_HIGH_INT"), # 0x8DF5
("glGet", I, 1, "GL_NUM_SHADER_BINARY_FORMATS"), # 0x8DF9
("glGet", B, 1, "GL_SHADER_COMPILER"), # 0x8DFA
("glGet", I, 1, "GL_MAX_VERTEX_UNIFORM_VECTORS"), # 0x8DFB
("glGet", I, 1, "GL_MAX_VARYING_VECTORS"), # 0x8DFC
("glGet", I, 1, "GL_MAX_FRAGMENT_UNIFORM_VECTORS"), # 0x8DFD
("", X, 1, "GL_RENDERBUFFER_COLOR_SAMPLES_NV"), # 0x8E10
("", X, 1, "GL_MAX_MULTISAMPLE_COVERAGE_MODES_NV"), # 0x8E11
("", X, 1, "GL_MULTISAMPLE_COVERAGE_MODES_NV"), # 0x8E12
("", X, 1, "GL_QUERY_WAIT"), # 0x8E13
("", X, 1, "GL_QUERY_NO_WAIT"), # 0x8E14
("", X, 1, "GL_QUERY_BY_REGION_WAIT"), # 0x8E15
("", X, 1, "GL_QUERY_BY_REGION_NO_WAIT"), # 0x8E16
("glGet", I, 1, "GL_MAX_COMBINED_TESS_CONTROL_UNIFORM_COMPONENTS"), # 0x8E1E
("glGet", I, 1, "GL_MAX_COMBINED_TESS_EVALUATION_UNIFORM_COMPONENTS"), # 0x8E1F
("", X, 1, "GL_COLOR_SAMPLES_NV"), # 0x8E20
("", X, 1, "GL_TRANSFORM_FEEDBACK"), # 0x8E22
("glGet", B, 1, "GL_TRANSFORM_FEEDBACK_BUFFER_PAUSED"), # 0x8E23
("glGet", B, 1, "GL_TRANSFORM_FEEDBACK_BUFFER_ACTIVE"), # 0x8E24
("glGet", I, 1, "GL_TRANSFORM_FEEDBACK_BINDING"), # 0x8E25
("", X, 1, "GL_FRAME_NV"), # 0x8E26
("", X, 1, "GL_FIELDS_NV"), # 0x8E27
("", X, 1, "GL_TIMESTAMP"), # 0x8E28
("", X, 1, "GL_NUM_FILL_STREAMS_NV"), # 0x8E29
("", X, 1, "GL_PRESENT_TIME_NV"), # 0x8E2A
("", X, 1, "GL_PRESENT_DURATION_NV"), # 0x8E2B
("", X, 1, "GL_PROGRAM_MATRIX_EXT"), # 0x8E2D
("", X, 1, "GL_TRANSPOSE_PROGRAM_MATRIX_EXT"), # 0x8E2E
("", X, 1, "GL_PROGRAM_MATRIX_STACK_DEPTH_EXT"), # 0x8E2F
("glGetTexParameter", E, 1, "GL_TEXTURE_SWIZZLE_R"), # 0x8E42
("glGetTexParameter", E, 1, "GL_TEXTURE_SWIZZLE_G"), # 0x8E43
("glGetTexParameter", E, 1, "GL_TEXTURE_SWIZZLE_B"), # 0x8E44
("glGetTexParameter", E, 1, "GL_TEXTURE_SWIZZLE_A"), # 0x8E45
("glGetTexParameter", E, 4, "GL_TEXTURE_SWIZZLE_RGBA"), # 0x8E46
("", X, 1, "GL_ACTIVE_SUBROUTINE_UNIFORM_LOCATIONS"), # 0x8E47
("", X, 1, "GL_ACTIVE_SUBROUTINE_MAX_LENGTH"), # 0x8E48
("", X, 1, "GL_ACTIVE_SUBROUTINE_UNIFORM_MAX_LENGTH"), # 0x8E49
("", I, 1, "GL_NUM_COMPATIBLE_SUBROUTINES"), # 0x8E4A
("", I, 1, "GL_COMPATIBLE_SUBROUTINES"), # 0x8E4B
("glGet", B, 1, "GL_QUADS_FOLLOW_PROVOKING_VERTEX_CONVENTION"), # 0x8E4C
("", X, 1, "GL_FIRST_VERTEX_CONVENTION"), # 0x8E4D
("", X, 1, "GL_LAST_VERTEX_CONVENTION"), # 0x8E4E
("glGet", E, 1, "GL_PROVOKING_VERTEX"), # 0x8E4F
("glGetMultisample", I, 1, "GL_SAMPLE_POSITION"), # 0x8E50
("glGet", B, 1, "GL_SAMPLE_MASK"), # 0x8E51
("glGet", I, 1, "GL_SAMPLE_MASK_VALUE"), # 0x8E52
("", X, 1, "GL_TEXTURE_BINDING_RENDERBUFFER_NV"), # 0x8E53
("glGet", I, 1, "GL_TEXTURE_RENDERBUFFER_DATA_STORE_BINDING_NV"), # 0x8E54
("", X, 1, "GL_TEXTURE_RENDERBUFFER_NV"), # 0x8E55
("", X, 1, "GL_SAMPLER_RENDERBUFFER_NV"), # 0x8E56
("", X, 1, "GL_INT_SAMPLER_RENDERBUFFER_NV"), # 0x8E57
("", X, 1, "GL_UNSIGNED_INT_SAMPLER_RENDERBUFFER_NV"), # 0x8E58
("glGet", I, 1, "GL_MAX_SAMPLE_MASK_WORDS"), # 0x8E59
("", X, 1, "GL_MAX_GEOMETRY_SHADER_INVOCATIONS"), # 0x8E5A
("", X, 1, "GL_MIN_FRAGMENT_INTERPOLATION_OFFSET"), # 0x8E5B
("", X, 1, "GL_MAX_FRAGMENT_INTERPOLATION_OFFSET"), # 0x8E5C
("", X, 1, "GL_FRAGMENT_INTERPOLATION_OFFSET_BITS"), # 0x8E5D
("", X, 1, "GL_MIN_PROGRAM_TEXTURE_GATHER_OFFSET"), # 0x8E5E
("", X, 1, "GL_MAX_PROGRAM_TEXTURE_GATHER_OFFSET"), # 0x8E5F
("glGet", I, 1, "GL_MAX_TRANSFORM_FEEDBACK_BUFFERS"), # 0x8E70
("glGet", I, 1, "GL_MAX_VERTEX_STREAMS"), # 0x8E71
("glGet", I, 1, "GL_PATCH_VERTICES"), # 0x8E72
("glGet", F, 2, "GL_PATCH_DEFAULT_INNER_LEVEL"), # 0x8E73
("glGet", F, 4, "GL_PATCH_DEFAULT_OUTER_LEVEL"), # 0x8E74
("glGet", X, 1, "GL_TESS_CONTROL_OUTPUT_VERTICES"), # 0x8E75
("glGet", E, 1, "GL_TESS_GEN_MODE"), # 0x8E76
("glGet", E, 1, "GL_TESS_GEN_SPACING"), # 0x8E77
("glGet", E, 1, "GL_TESS_GEN_VERTEX_ORDER"), # 0x8E78
("glGet", E, 1, "GL_TESS_GEN_POINT_MODE"), # 0x8E79
("", X, 1, "GL_ISOLINES"), # 0x8E7A
("", X, 1, "GL_FRACTIONAL_ODD"), # 0x8E7B
("", X, 1, "GL_FRACTIONAL_EVEN"), # 0x8E7C
("glGet", I, 1, "GL_MAX_PATCH_VERTICES"), # 0x8E7D
("glGet", I, 1, "GL_MAX_TESS_GEN_LEVEL"), # 0x8E7E
("glGet", I, 1, "GL_MAX_TESS_CONTROL_UNIFORM_COMPONENTS"), # 0x8E7F
("glGet", I, 1, "GL_MAX_TESS_EVALUATION_UNIFORM_COMPONENTS"), # 0x8E80
("glGet", I, 1, "GL_MAX_TESS_CONTROL_TEXTURE_IMAGE_UNITS"), # 0x8E81
("glGet", I, 1, "GL_MAX_TESS_EVALUATION_TEXTURE_IMAGE_UNITS"), # 0x8E82
("glGet", I, 1, "GL_MAX_TESS_CONTROL_OUTPUT_COMPONENTS"), # 0x8E83
("glGet", I, 1, "GL_MAX_TESS_PATCH_COMPONENTS"), # 0x8E84
("glGet", I, 1, "GL_MAX_TESS_CONTROL_TOTAL_OUTPUT_COMPONENTS"), # 0x8E85
("glGet", I, 1, "GL_MAX_TESS_EVALUATION_OUTPUT_COMPONENTS"), # 0x8E86
("glGetProgramPipeline", I, 1, "GL_TESS_EVALUATION_SHADER"), # 0x8E87
("glGetProgramPipeline", I, 1, "GL_TESS_CONTROL_SHADER"), # 0x8E88
("glGet", I, 1, "GL_MAX_TESS_CONTROL_UNIFORM_BLOCKS"), # 0x8E89
("glGet", I, 1, "GL_MAX_TESS_EVALUATION_UNIFORM_BLOCKS"), # 0x8E8A
("", X, 1, "GL_COMPRESSED_RGBA_BPTC_UNORM_ARB"), # 0x8E8C
("", X, 1, "GL_COMPRESSED_SRGB_ALPHA_BPTC_UNORM_ARB"), # 0x8E8D
("", X, 1, "GL_COMPRESSED_RGB_BPTC_SIGNED_FLOAT_ARB"), # 0x8E8E
("", X, 1, "GL_COMPRESSED_RGB_BPTC_UNSIGNED_FLOAT_ARB"), # 0x8E8F
#("", X, 1, "GL_COVERAGE_COMPONENT_NV"), # 0x8ED0
#("", X, 1, "GL_COVERAGE_COMPONENT4_NV"), # 0x8ED1
#("", X, 1, "GL_COVERAGE_ATTACHMENT_NV"), # 0x8ED2
#("", X, 1, "GL_COVERAGE_BUFFERS_NV"), # 0x8ED3
#("", X, 1, "GL_COVERAGE_SAMPLES_NV"), # 0x8ED4
#("", X, 1, "GL_COVERAGE_ALL_FRAGMENTS_NV"), # 0x8ED5
#("", X, 1, "GL_COVERAGE_EDGE_FRAGMENTS_NV"), # 0x8ED6
#("", X, 1, "GL_COVERAGE_AUTOMATIC_NV"), # 0x8ED7
("", X, 1, "GL_BUFFER_GPU_ADDRESS_NV"), # 0x8F1D
("", X, 1, "GL_VERTEX_ATTRIB_ARRAY_UNIFIED_NV"), # 0x8F1E
("", X, 1, "GL_ELEMENT_ARRAY_UNIFIED_NV"), # 0x8F1F
("", X, 1, "GL_VERTEX_ATTRIB_ARRAY_ADDRESS_NV"), # 0x8F20
("", X, 1, "GL_VERTEX_ARRAY_ADDRESS_NV"), # 0x8F21
("", X, 1, "GL_NORMAL_ARRAY_ADDRESS_NV"), # 0x8F22
("", X, 1, "GL_COLOR_ARRAY_ADDRESS_NV"), # 0x8F23
("", X, 1, "GL_INDEX_ARRAY_ADDRESS_NV"), # 0x8F24
("", X, 1, "GL_TEXTURE_COORD_ARRAY_ADDRESS_NV"), # 0x8F25
("", X, 1, "GL_EDGE_FLAG_ARRAY_ADDRESS_NV"), # 0x8F26
("", X, 1, "GL_SECONDARY_COLOR_ARRAY_ADDRESS_NV"), # 0x8F27
("", X, 1, "GL_FOG_COORD_ARRAY_ADDRESS_NV"), # 0x8F28
("", X, 1, "GL_ELEMENT_ARRAY_ADDRESS_NV"), # 0x8F29
("", X, 1, "GL_VERTEX_ATTRIB_ARRAY_LENGTH_NV"), # 0x8F2A
("", X, 1, "GL_VERTEX_ARRAY_LENGTH_NV"), # 0x8F2B
("", X, 1, "GL_NORMAL_ARRAY_LENGTH_NV"), # 0x8F2C
("", X, 1, "GL_COLOR_ARRAY_LENGTH_NV"), # 0x8F2D
("", X, 1, "GL_INDEX_ARRAY_LENGTH_NV"), # 0x8F2E
("", X, 1, "GL_TEXTURE_COORD_ARRAY_LENGTH_NV"), # 0x8F2F
("", X, 1, "GL_EDGE_FLAG_ARRAY_LENGTH_NV"), # 0x8F30
("", X, 1, "GL_SECONDARY_COLOR_ARRAY_LENGTH_NV"), # 0x8F31
("", X, 1, "GL_FOG_COORD_ARRAY_LENGTH_NV"), # 0x8F32
("", X, 1, "GL_ELEMENT_ARRAY_LENGTH_NV"), # 0x8F33
("", X, 1, "GL_GPU_ADDRESS_NV"), # 0x8F34
("", X, 1, "GL_MAX_SHADER_BUFFER_ADDRESS_NV"), # 0x8F35
("glGet", I, 1, "GL_COPY_READ_BUFFER"), # 0x8F36
("glGet", I, 1, "GL_COPY_WRITE_BUFFER"), # 0x8F37
("", X, 1, "GL_MAX_IMAGE_UNITS"), # 0x8F38
("", X, 1, "GL_MAX_COMBINED_IMAGE_UNITS_AND_FRAGMENT_OUTPUTS"), # 0x8F39
("", X, 1, "GL_IMAGE_BINDING_NAME"), # 0x8F3A
("", X, 1, "GL_IMAGE_BINDING_LEVEL"), # 0x8F3B
("", X, 1, "GL_IMAGE_BINDING_LAYERED"), # 0x8F3C
("", X, 1, "GL_IMAGE_BINDING_LAYER"), # 0x8F3D
("", X, 1, "GL_IMAGE_BINDING_ACCESS"), # 0x8F3E
("", X, 1, "GL_DRAW_INDIRECT_BUFFER"), # 0x8F3F
("", X, 1, "GL_DRAW_INDIRECT_UNIFIED_NV"), # 0x8F40
("", X, 1, "GL_DRAW_INDIRECT_ADDRESS_NV"), # 0x8F41
("", X, 1, "GL_DRAW_INDIRECT_LENGTH_NV"), # 0x8F42
("glGet", I, 1, "GL_DRAW_INDIRECT_BUFFER_BINDING"), # 0x8F43
("", X, 1, "GL_MAX_PROGRAM_SUBROUTINE_PARAMETERS_NV"), # 0x8F44
("", X, 1, "GL_MAX_PROGRAM_SUBROUTINE_NUM_NV"), # 0x8F45
("", X, 1, "GL_DOUBLE_MAT2"), # 0x8F46
("", X, 1, "GL_DOUBLE_MAT3"), # 0x8F47
("", X, 1, "GL_DOUBLE_MAT4"), # 0x8F48
("", X, 1, "GL_DOUBLE_MAT2x3"), # 0x8F49
("", X, 1, "GL_DOUBLE_MAT2x4"), # 0x8F4A
("", X, 1, "GL_DOUBLE_MAT3x2"), # 0x8F4B
("", X, 1, "GL_DOUBLE_MAT3x4"), # 0x8F4C
("", X, 1, "GL_DOUBLE_MAT4x2"), # 0x8F4D
("", X, 1, "GL_DOUBLE_MAT4x3"), # 0x8F4E
("", X, 1, "GL_MALI_SHADER_BINARY_ARM"), # 0x8F60
("", X, 1, "GL_RED_SNORM"), # 0x8F90
("", X, 1, "GL_RG_SNORM"), # 0x8F91
("", X, 1, "GL_RGB_SNORM"), # 0x8F92
("", X, 1, "GL_RGBA_SNORM"), # 0x8F93
("", X, 1, "GL_R8_SNORM"), # 0x8F94
("", X, 1, "GL_RG8_SNORM"), # 0x8F95
("", X, 1, "GL_RGB8_SNORM"), # 0x8F96
("", X, 1, "GL_RGBA8_SNORM"), # 0x8F97
("", X, 1, "GL_R16_SNORM"), # 0x8F98
("", X, 1, "GL_RG16_SNORM"), # 0x8F99
("", X, 1, "GL_RGB16_SNORM"), # 0x8F9A
("", X, 1, "GL_RGBA16_SNORM"), # 0x8F9B
("", X, 1, "GL_SIGNED_NORMALIZED"), # 0x8F9C
("glGet", B, 1, "GL_PRIMITIVE_RESTART"), # 0x8F9D
("glGet", I, 1, "GL_PRIMITIVE_RESTART_INDEX"), # 0x8F9E
#("", X, 1, "GL_MAX_PROGRAM_TEXTURE_GATHER_COMPONENTS"), # 0x8F9F
("", X, 1, "GL_PERFMON_GLOBAL_MODE_QCOM"), # 0x8FA0
("", X, 1, "GL_SHADER_BINARY_VIV"), # 0x8FC4
("", X, 1, "GL_INT8_NV"), # 0x8FE0
("", X, 1, "GL_INT8_VEC2_NV"), # 0x8FE1
("", X, 1, "GL_INT8_VEC3_NV"), # 0x8FE2
("", X, 1, "GL_INT8_VEC4_NV"), # 0x8FE3
("", X, 1, "GL_INT16_NV"), # 0x8FE4
("", X, 1, "GL_INT16_VEC2_NV"), # 0x8FE5
("", X, 1, "GL_INT16_VEC3_NV"), # 0x8FE6
("", X, 1, "GL_INT16_VEC4_NV"), # 0x8FE7
("", X, 1, "GL_INT64_VEC2_NV"), # 0x8FE9
("", X, 1, "GL_INT64_VEC3_NV"), # 0x8FEA
("", X, 1, "GL_INT64_VEC4_NV"), # 0x8FEB
("", X, 1, "GL_UNSIGNED_INT8_NV"), # 0x8FEC
("", X, 1, "GL_UNSIGNED_INT8_VEC2_NV"), # 0x8FED
("", X, 1, "GL_UNSIGNED_INT8_VEC3_NV"), # 0x8FEE
("", X, 1, "GL_UNSIGNED_INT8_VEC4_NV"), # 0x8FEF
("", X, 1, "GL_UNSIGNED_INT16_NV"), # 0x8FF0
("", X, 1, "GL_UNSIGNED_INT16_VEC2_NV"), # 0x8FF1
("", X, 1, "GL_UNSIGNED_INT16_VEC3_NV"), # 0x8FF2
("", X, 1, "GL_UNSIGNED_INT16_VEC4_NV"), # 0x8FF3
("", X, 1, "GL_UNSIGNED_INT64_VEC2_NV"), # 0x8FF5
("", X, 1, "GL_UNSIGNED_INT64_VEC3_NV"), # 0x8FF6
("", X, 1, "GL_UNSIGNED_INT64_VEC4_NV"), # 0x8FF7
("", X, 1, "GL_FLOAT16_NV"), # 0x8FF8
("", X, 1, "GL_FLOAT16_VEC2_NV"), # 0x8FF9
("", X, 1, "GL_FLOAT16_VEC3_NV"), # 0x8FFA
("", X, 1, "GL_FLOAT16_VEC4_NV"), # 0x8FFB
("", X, 1, "GL_DOUBLE_VEC2"), # 0x8FFC
("", X, 1, "GL_DOUBLE_VEC3"), # 0x8FFD
("", X, 1, "GL_DOUBLE_VEC4"), # 0x8FFE
("", X, 1, "GL_SAMPLER_BUFFER_AMD"), # 0x9001
("", X, 1, "GL_INT_SAMPLER_BUFFER_AMD"), # 0x9002
("", X, 1, "GL_UNSIGNED_INT_SAMPLER_BUFFER_AMD"), # 0x9003
("", X, 1, "GL_TESSELLATION_MODE_AMD"), # 0x9004
("", X, 1, "GL_TESSELLATION_FACTOR_AMD"), # 0x9005
("", X, 1, "GL_DISCRETE_AMD"), # 0x9006
("", X, 1, "GL_CONTINUOUS_AMD"), # 0x9007
("_glGet", B, 1, "GL_TEXTURE_CUBE_MAP_ARRAY"), # 0x9009
("_glGet", I, 1, "GL_TEXTURE_BINDING_CUBE_MAP_ARRAY"), # 0x900A
("", X, 1, "GL_PROXY_TEXTURE_CUBE_MAP_ARRAY"), # 0x900B
("", X, 1, "GL_SAMPLER_CUBE_MAP_ARRAY"), # 0x900C
("", X, 1, "GL_SAMPLER_CUBE_MAP_ARRAY_SHADOW"), # 0x900D
("", X, 1, "GL_INT_SAMPLER_CUBE_MAP_ARRAY"), # 0x900E
("", X, 1, "GL_UNSIGNED_INT_SAMPLER_CUBE_MAP_ARRAY"), # 0x900F
("", X, 1, "GL_ALPHA_SNORM"), # 0x9010
("", X, 1, "GL_LUMINANCE_SNORM"), # 0x9011
("", X, 1, "GL_LUMINANCE_ALPHA_SNORM"), # 0x9012
("", X, 1, "GL_INTENSITY_SNORM"), # 0x9013
("", X, 1, "GL_ALPHA8_SNORM"), # 0x9014
("", X, 1, "GL_LUMINANCE8_SNORM"), # 0x9015
("", X, 1, "GL_LUMINANCE8_ALPHA8_SNORM"), # 0x9016
("", X, 1, "GL_INTENSITY8_SNORM"), # 0x9017
("", X, 1, "GL_ALPHA16_SNORM"), # 0x9018
("", X, 1, "GL_LUMINANCE16_SNORM"), # 0x9019
("", X, 1, "GL_LUMINANCE16_ALPHA16_SNORM"), # 0x901A
("", X, 1, "GL_INTENSITY16_SNORM"), # 0x901B
("", X, 1, "GL_FACTOR_MIN_AMD"), # 0x901C
("", X, 1, "GL_FACTOR_MAX_AMD"), # 0x901D
("", B, 1, "GL_DEPTH_CLAMP_NEAR_AMD"), # 0x901E
("", B, 1, "GL_DEPTH_CLAMP_FAR_AMD"), # 0x901F
("", X, 1, "GL_VIDEO_BUFFER_NV"), # 0x9020
("glGet", I, 1, "GL_VIDEO_BUFFER_BINDING_NV"), # 0x9021
("", X, 1, "GL_FIELD_UPPER_NV"), # 0x9022
("", X, 1, "GL_FIELD_LOWER_NV"), # 0x9023
("", X, 1, "GL_NUM_VIDEO_CAPTURE_STREAMS_NV"), # 0x9024
("", X, 1, "GL_NEXT_VIDEO_CAPTURE_BUFFER_STATUS_NV"), # 0x9025
("", X, 1, "GL_VIDEO_CAPTURE_TO_422_SUPPORTED_NV"), # 0x9026
("", X, 1, "GL_LAST_VIDEO_CAPTURE_STATUS_NV"), # 0x9027
("", X, 1, "GL_VIDEO_BUFFER_PITCH_NV"), # 0x9028
("", X, 1, "GL_VIDEO_COLOR_CONVERSION_MATRIX_NV"), # 0x9029
("", X, 1, "GL_VIDEO_COLOR_CONVERSION_MAX_NV"), # 0x902A
("", X, 1, "GL_VIDEO_COLOR_CONVERSION_MIN_NV"), # 0x902B
("", X, 1, "GL_VIDEO_COLOR_CONVERSION_OFFSET_NV"), # 0x902C
("", X, 1, "GL_VIDEO_BUFFER_INTERNAL_FORMAT_NV"), # 0x902D
("", X, 1, "GL_PARTIAL_SUCCESS_NV"), # 0x902E
("", X, 1, "GL_SUCCESS_NV"), # 0x902F
("", X, 1, "GL_FAILURE_NV"), # 0x9030
("", X, 1, "GL_YCBYCR8_422_NV"), # 0x9031
("", X, 1, "GL_YCBAYCR8A_4224_NV"), # 0x9032
("", X, 1, "GL_Z6Y10Z6CB10Z6Y10Z6CR10_422_NV"), # 0x9033
("", X, 1, "GL_Z6Y10Z6CB10Z6A10Z6Y10Z6CR10Z6A10_4224_NV"), # 0x9034
("", X, 1, "GL_Z4Y12Z4CB12Z4Y12Z4CR12_422_NV"), # 0x9035
("", X, 1, "GL_Z4Y12Z4CB12Z4A12Z4Y12Z4CR12Z4A12_4224_NV"), # 0x9036
("", X, 1, "GL_Z4Y12Z4CB12Z4CR12_444_NV"), # 0x9037
("", X, 1, "GL_VIDEO_CAPTURE_FRAME_WIDTH_NV"), # 0x9038
("", X, 1, "GL_VIDEO_CAPTURE_FRAME_HEIGHT_NV"), # 0x9039
("", X, 1, "GL_VIDEO_CAPTURE_FIELD_UPPER_HEIGHT_NV"), # 0x903A
("", X, 1, "GL_VIDEO_CAPTURE_FIELD_LOWER_HEIGHT_NV"), # 0x903B
("", X, 1, "GL_VIDEO_CAPTURE_SURFACE_ORIGIN_NV"), # 0x903C
("", X, 1, "GL_TEXTURE_COVERAGE_SAMPLES_NV"), # 0x9045
("", X, 1, "GL_TEXTURE_COLOR_SAMPLES_NV"), # 0x9046
("glGet", I, 1, "GL_GPU_MEMORY_INFO_DEDICATED_VIDMEM_NVX"), # 0x9047
("glGet", I, 1, "GL_GPU_MEMORY_INFO_TOTAL_AVAILABLE_MEMORY_NVX"), # 0x9048
("glGet", I, 1, "GL_GPU_MEMORY_INFO_CURRENT_AVAILABLE_VIDMEM_NVX"), # 0x9049
("glGet", I, 1, "GL_GPU_MEMORY_INFO_EVICTION_COUNT_NVX"), # 0x904A
("glGet", I, 1, "GL_GPU_MEMORY_INFO_EVICTED_MEMORY_NVX"), # 0x904B
("", X, 1, "GL_IMAGE_1D"), # 0x904C
("", X, 1, "GL_IMAGE_2D"), # 0x904D
("", X, 1, "GL_IMAGE_3D"), # 0x904E
("", X, 1, "GL_IMAGE_2D_RECT"), # 0x904F
("", X, 1, "GL_IMAGE_CUBE"), # 0x9050
("", X, 1, "GL_IMAGE_BUFFER"), # 0x9051
("", X, 1, "GL_IMAGE_1D_ARRAY"), # 0x9052
("", X, 1, "GL_IMAGE_2D_ARRAY"), # 0x9053
("", X, 1, "GL_IMAGE_CUBE_MAP_ARRAY"), # 0x9054
("", X, 1, "GL_IMAGE_2D_MULTISAMPLE"), # 0x9055
("", X, 1, "GL_IMAGE_2D_MULTISAMPLE_ARRAY"), # 0x9056
("", X, 1, "GL_INT_IMAGE_1D"), # 0x9057
("", X, 1, "GL_INT_IMAGE_2D"), # 0x9058
("", X, 1, "GL_INT_IMAGE_3D"), # 0x9059
("", X, 1, "GL_INT_IMAGE_2D_RECT"), # 0x905A
("", X, 1, "GL_INT_IMAGE_CUBE"), # 0x905B
("", X, 1, "GL_INT_IMAGE_BUFFER"), # 0x905C
("", X, 1, "GL_INT_IMAGE_1D_ARRAY"), # 0x905D
("", X, 1, "GL_INT_IMAGE_2D_ARRAY"), # 0x905E
("", X, 1, "GL_INT_IMAGE_CUBE_MAP_ARRAY"), # 0x905F
("", X, 1, "GL_INT_IMAGE_2D_MULTISAMPLE"), # 0x9060
("", X, 1, "GL_INT_IMAGE_2D_MULTISAMPLE_ARRAY"), # 0x9061
("", X, 1, "GL_UNSIGNED_INT_IMAGE_1D"), # 0x9062
("", X, 1, "GL_UNSIGNED_INT_IMAGE_2D"), # 0x9063
("", X, 1, "GL_UNSIGNED_INT_IMAGE_3D"), # 0x9064
("", X, 1, "GL_UNSIGNED_INT_IMAGE_2D_RECT"), # 0x9065
("", X, 1, "GL_UNSIGNED_INT_IMAGE_CUBE"), # 0x9066
("", X, 1, "GL_UNSIGNED_INT_IMAGE_BUFFER"), # 0x9067
("", X, 1, "GL_UNSIGNED_INT_IMAGE_1D_ARRAY"), # 0x9068
("", X, 1, "GL_UNSIGNED_INT_IMAGE_2D_ARRAY"), # 0x9069
("", X, 1, "GL_UNSIGNED_INT_IMAGE_CUBE_MAP_ARRAY"), # 0x906A
("", X, 1, "GL_UNSIGNED_INT_IMAGE_2D_MULTISAMPLE"), # 0x906B
("", X, 1, "GL_UNSIGNED_INT_IMAGE_2D_MULTISAMPLE_ARRAY"), # 0x906C
("", X, 1, "GL_MAX_IMAGE_SAMPLES"), # 0x906D
("", X, 1, "GL_IMAGE_BINDING_FORMAT"), # 0x906E
("", X, 1, "GL_RGB10_A2UI"), # 0x906F
("", X, 1, "GL_PATH_FORMAT_SVG_NV"), # 0x9070
("", X, 1, "GL_PATH_FORMAT_PS_NV"), # 0x9071
("", X, 1, "GL_STANDARD_FONT_NAME_NV"), # 0x9072
("", X, 1, "GL_SYSTEM_FONT_NAME_NV"), # 0x9073
("", X, 1, "GL_FILE_NAME_NV"), # 0x9074
("", X, 1, "GL_PATH_STROKE_WIDTH_NV"), # 0x9075
("", X, 1, "GL_PATH_END_CAPS_NV"), # 0x9076
("", X, 1, "GL_PATH_INITIAL_END_CAP_NV"), # 0x9077
("", X, 1, "GL_PATH_TERMINAL_END_CAP_NV"), # 0x9078
("", X, 1, "GL_PATH_JOIN_STYLE_NV"), # 0x9079
("", X, 1, "GL_PATH_MITER_LIMIT_NV"), # 0x907A
("", X, 1, "GL_PATH_DASH_CAPS_NV"), # 0x907B
("", X, 1, "GL_PATH_INITIAL_DASH_CAP_NV"), # 0x907C
("", X, 1, "GL_PATH_TERMINAL_DASH_CAP_NV"), # 0x907D
("", X, 1, "GL_PATH_DASH_OFFSET_NV"), # 0x907E
("", X, 1, "GL_PATH_CLIENT_LENGTH_NV"), # 0x907F
("", X, 1, "GL_PATH_FILL_MODE_NV"), # 0x9080
("", X, 1, "GL_PATH_FILL_MASK_NV"), # 0x9081
("", X, 1, "GL_PATH_FILL_COVER_MODE_NV"), # 0x9082
("", X, 1, "GL_PATH_STROKE_COVER_MODE_NV"), # 0x9083
("", X, 1, "GL_PATH_STROKE_MASK_NV"), # 0x9084
("", X, 1, "GL_PATH_SAMPLE_QUALITY_NV"), # 0x9085
("", X, 1, "GL_PATH_STROKE_BOUND_NV"), # 0x9086
("", X, 1, "GL_PATH_STROKE_OVERSAMPLE_COUNT_NV"), # 0x9087
("", X, 1, "GL_COUNT_UP_NV"), # 0x9088
("", X, 1, "GL_COUNT_DOWN_NV"), # 0x9089
("", X, 1, "GL_PATH_OBJECT_BOUNDING_BOX_NV"), # 0x908A
("", X, 1, "GL_CONVEX_HULL_NV"), # 0x908B
("", X, 1, "GL_MULTI_HULLS_NV"), # 0x908C
("", X, 1, "GL_BOUNDING_BOX_NV"), # 0x908D
("", X, 1, "GL_TRANSLATE_X_NV"), # 0x908E
("", X, 1, "GL_TRANSLATE_Y_NV"), # 0x908F
("", X, 1, "GL_TRANSLATE_2D_NV"), # 0x9090
("", X, 1, "GL_TRANSLATE_3D_NV"), # 0x9091
("", X, 1, "GL_AFFINE_2D_NV"), # 0x9092
("", X, 1, "GL_PROJECTIVE_2D_NV"), # 0x9093
("", X, 1, "GL_AFFINE_3D_NV"), # 0x9094
("", X, 1, "GL_PROJECTIVE_3D_NV"), # 0x9095
("", X, 1, "GL_TRANSPOSE_AFFINE_2D_NV"), # 0x9096
("", X, 1, "GL_TRANSPOSE_PROJECTIVE_2D_NV"), # 0x9097
("", X, 1, "GL_TRANSPOSE_AFFINE_3D_NV"), # 0x9098
("", X, 1, "GL_TRANSPOSE_PROJECTIVE_3D_NV"), # 0x9099
("", X, 1, "GL_UTF8_NV"), # 0x909A
("", X, 1, "GL_UTF16_NV"), # 0x909B
("", X, 1, "GL_BOUNDING_BOX_OF_BOUNDING_BOXES_NV"), # 0x909C
("", X, 1, "GL_PATH_COMMAND_COUNT_NV"), # 0x909D
("", X, 1, "GL_PATH_COORD_COUNT_NV"), # 0x909E
("", X, 1, "GL_PATH_DASH_ARRAY_COUNT_NV"), # 0x909F
("", X, 1, "GL_PATH_COMPUTED_LENGTH_NV"), # 0x90A0
("", X, 1, "GL_PATH_FILL_BOUNDING_BOX_NV"), # 0x90A1
("", X, 1, "GL_PATH_STROKE_BOUNDING_BOX_NV"), # 0x90A2
("", X, 1, "GL_SQUARE_NV"), # 0x90A3
("", X, 1, "GL_ROUND_NV"), # 0x90A4
("", X, 1, "GL_TRIANGULAR_NV"), # 0x90A5
("", X, 1, "GL_BEVEL_NV"), # 0x90A6
("", X, 1, "GL_MITER_REVERT_NV"), # 0x90A7
("", X, 1, "GL_MITER_TRUNCATE_NV"), # 0x90A8
("", X, 1, "GL_SKIP_MISSING_GLYPH_NV"), # 0x90A9
("", X, 1, "GL_USE_MISSING_GLYPH_NV"), # 0x90AA
("", X, 1, "GL_PATH_ERROR_POSITION_NV"), # 0x90AB
("", X, 1, "GL_PATH_FOG_GEN_MODE_NV"), # 0x90AC
("", X, 1, "GL_ACCUM_ADJACENT_PAIRS_NV"), # 0x90AD
("", X, 1, "GL_ADJACENT_PAIRS_NV"), # 0x90AE
("", X, 1, "GL_FIRST_TO_REST_NV"), # 0x90AF
("", X, 1, "GL_PATH_GEN_MODE_NV"), # 0x90B0
("", X, 1, "GL_PATH_GEN_COEFF_NV"), # 0x90B1
("", X, 1, "GL_PATH_GEN_COLOR_FORMAT_NV"), # 0x90B2
("", X, 1, "GL_PATH_GEN_COMPONENTS_NV"), # 0x90B3
("", X, 1, "GL_PATH_DASH_OFFSET_RESET_NV"), # 0x90B4
("", X, 1, "GL_MOVE_TO_RESETS_NV"), # 0x90B5
("", X, 1, "GL_MOVE_TO_CONTINUES_NV"), # 0x90B6
("", X, 1, "GL_PATH_STENCIL_FUNC_NV"), # 0x90B7
("", X, 1, "GL_PATH_STENCIL_REF_NV"), # 0x90B8
("", X, 1, "GL_PATH_STENCIL_VALUE_MASK_NV"), # 0x90B9
("", X, 1, "GL_SCALED_RESOLVE_FASTEST_EXT"), # 0x90BA
("", X, 1, "GL_SCALED_RESOLVE_NICEST_EXT"), # 0x90BB
("", X, 1, "GL_MIN_MAP_BUFFER_ALIGNMENT"), # 0x90BC
("", X, 1, "GL_PATH_STENCIL_DEPTH_OFFSET_FACTOR_NV"), # 0x90BD
("", X, 1, "GL_PATH_STENCIL_DEPTH_OFFSET_UNITS_NV"), # 0x90BE
("", X, 1, "GL_PATH_COVER_DEPTH_FUNC_NV"), # 0x90BF
("", X, 1, "GL_IMAGE_FORMAT_COMPATIBILITY_TYPE"), # 0x90C7
("", X, 1, "GL_IMAGE_FORMAT_COMPATIBILITY_BY_SIZE"), # 0x90C8
("", X, 1, "GL_IMAGE_FORMAT_COMPATIBILITY_BY_CLASS"), # 0x90C9
("", X, 1, "GL_MAX_VERTEX_IMAGE_UNIFORMS"), # 0x90CA
("", X, 1, "GL_MAX_TESS_CONTROL_IMAGE_UNIFORMS"), # 0x90CB
("", X, 1, "GL_MAX_TESS_EVALUATION_IMAGE_UNIFORMS"), # 0x90CC
("", X, 1, "GL_MAX_GEOMETRY_IMAGE_UNIFORMS"), # 0x90CD
("", X, 1, "GL_MAX_FRAGMENT_IMAGE_UNIFORMS"), # 0x90CE
("", X, 1, "GL_MAX_COMBINED_IMAGE_UNIFORMS"), # 0x90CF
("", I, 1, "GL_SHADER_STORAGE_BUFFER"), # 0x90D2
("", I, 1, "GL_SHADER_STORAGE_BUFFER_BINDING"), # 0x90D3
("", I, 1, "GL_SHADER_STORAGE_BUFFER_START"), # 0x90D4
("", I, 1, "GL_SHADER_STORAGE_BUFFER_SIZE"), # 0x90D5
("", I, 1, "GL_MAX_VERTEX_SHADER_STORAGE_BLOCKS"), # 0x90D6
("", I, 1, "GL_MAX_GEOMETRY_SHADER_STORAGE_BLOCKS"), # 0x90D7
("", I, 1, "GL_MAX_TESS_CONTROL_SHADER_STORAGE_BLOCKS"), # 0x90D8
("", I, 1, "GL_MAX_TESS_EVALUATION_SHADER_STORAGE_BLOCKS"), # 0x90D9
("", I, 1, "GL_MAX_FRAGMENT_SHADER_STORAGE_BLOCKS"), # 0x90DA
("glGet", I, 1, "GL_MAX_COMPUTE_SHADER_STORAGE_BLOCKS"), # 0x90DB
("", I, 1, "GL_MAX_COMBINED_SHADER_STORAGE_BLOCKS"), # 0x90DC
("", I, 1, "GL_MAX_SHADER_STORAGE_BUFFER_BINDINGS"), # 0x90DD
("", I, 1, "GL_MAX_SHADER_STORAGE_BLOCK_SIZE"), # 0x90DE
("", X, 1, "GL_SHADER_STORAGE_BUFFER_OFFSET_ALIGNMENT"), # 0x90DF
("", X, 1, "GL_SYNC_X11_FENCE_EXT"), # 0x90E1
("glGetTexParameter", E, 1, "GL_DEPTH_STENCIL_TEXTURE_MODE"), # 0x90EA
("glGet", I, 1, "GL_MAX_COMPUTE_LOCAL_INVOCATIONS"), # 0x90EB
("", X, 1, "GL_UNIFORM_BLOCK_REFERENCED_BY_COMPUTE_SHADER"), # 0x90EC
("", X, 1, "GL_ATOMIC_COUNTER_BUFFER_REFERENCED_BY_COMPUTE_SHADER"), # 0x90ED
("", X, 1, "GL_DISPATCH_INDIRECT_BUFFER"), # 0x90EE
("glGet", I, 1, "GL_DISPATCH_INDIRECT_BUFFER_BINDING"), # 0x90EF
("", X, 1, "GL_TEXTURE_2D_MULTISAMPLE"), # 0x9100
("", X, 1, "GL_PROXY_TEXTURE_2D_MULTISAMPLE"), # 0x9101
("", X, 1, "GL_TEXTURE_2D_MULTISAMPLE_ARRAY"), # 0x9102
("", X, 1, "GL_PROXY_TEXTURE_2D_MULTISAMPLE_ARRAY"), # 0x9103
("_glGet", B, 1, "GL_TEXTURE_BINDING_2D_MULTISAMPLE"), # 0x9104
("_glGet", I, 1, "GL_TEXTURE_BINDING_2D_MULTISAMPLE_ARRAY"), # 0x9105
("glGetTexLevelParameter", I, 1, "GL_TEXTURE_SAMPLES"), # 0x9106
("glGetTexLevelParameter", B, 1, "GL_TEXTURE_FIXED_SAMPLE_LOCATIONS"), # 0x9107
("", X, 1, "GL_SAMPLER_2D_MULTISAMPLE"), # 0x9108
("", X, 1, "GL_INT_SAMPLER_2D_MULTISAMPLE"), # 0x9109
("", X, 1, "GL_UNSIGNED_INT_SAMPLER_2D_MULTISAMPLE"), # 0x910A
("", X, 1, "GL_SAMPLER_2D_MULTISAMPLE_ARRAY"), # 0x910B
("", X, 1, "GL_INT_SAMPLER_2D_MULTISAMPLE_ARRAY"), # 0x910C
("", X, 1, "GL_UNSIGNED_INT_SAMPLER_2D_MULTISAMPLE_ARRAY"), # 0x910D
("glGet", I, 1, "GL_MAX_COLOR_TEXTURE_SAMPLES"), # 0x910E
("glGet", I, 1, "GL_MAX_DEPTH_TEXTURE_SAMPLES"), # 0x910F
("glGet", I, 1, "GL_MAX_INTEGER_SAMPLES"), # 0x9110
("_glGet", I64, 1, "GL_MAX_SERVER_WAIT_TIMEOUT"), # 0x9111
("", X, 1, "GL_OBJECT_TYPE"), # | |
core of the gaussian.
# We take this to be the 'external' part of of the scan range
# work out the external part mask
with warnings.catch_warnings(record=True) as _:
external_mask = np.abs(drange) > ccf_scan_range / 2
# calculate and subtract external part
external_water = np.nanmedian(ccf_water[external_mask])
ccf_water = ccf_water - external_water
external_others = np.nanmedian(ccf_others[external_mask])
ccf_others = ccf_others - external_others
# ------------------------------------------------------------------
# get the amplitude of the middle of the CCF
# work out the internal part mask
internal_mask = np.abs(drange) < ccf_scan_range / 4
amp_water = np.nansum(ccf_water[internal_mask])
if not force_airmass:
amp_others = np.nansum(ccf_others[internal_mask])
else:
amp_others = 0.0
# ------------------------------------------------------------------
# count the number of NaNs in the CCF
num_nan_ccf = np.sum(~np.isfinite(ccf_water))
# if CCF is NaN do not continue
if num_nan_ccf > 0:
# update qc params
qc_values[1] = num_nan_ccf
qc_pass[1] = 0
# flag qc as failed and break
flag_qc = True
break
else:
qc_values[1] = num_nan_ccf
qc_pass[1] = 1
# ------------------------------------------------------------------
# we measure absorption velocity by fitting a gaussian to the
# absorption profile. This updates the dv_abso value for the
# next steps.
# if this is the first iteration then fit the absorption velocity
if iteration == 0:
# make a guess for the water fit parameters (for curve fit)
water_guess = [np.nanmin(ccf_water), 0, 4]
# fit the ccf_water with a guassian
popt, pcov = curve_fit(mp.gauss_function_nodc, drange, ccf_water,
p0=water_guess)
# store the velocity of the water
dv_water = popt[1]
# make a guess of the others fit parameters (for curve fit)
others_guess = [np.nanmin(ccf_water), 0, 4]
# fit the ccf_others with a gaussian
popt, pconv = curve_fit(mp.gauss_function_nodc, drange, ccf_others,
p0=others_guess)
# store the velocity of the other species
dv_others = popt[1]
# store the mean velocity of water and others
dv_abso = np.mean([dv_water, dv_others])
# ------------------------------------------------------------------
# store the amplitudes of current exponent values
# for other species
if not force_airmass:
amp_others_list.append(amp_others)
expo_others_list.append(expo_others)
# for water
amp_water_list.append(amp_water)
expo_water_list.append(expo_water)
# ------------------------------------------------------------------
# if this is the first iteration force the values of
# expo_others and expo water
if iteration == 0:
# header value to be used
expo_others = float(hdr_airmass)
# default value for water
expo_water = float(default_water_abso)
# ------------------------------------------------------------------
# else we fit the amplitudes with polynomial fits
else:
# --------------------------------------------------------------
# set value for fit_others
fit_others = [np.nan, hdr_airmass, np.nan]
# convert lists to arrays
amp_others_arr = np.array(amp_others_list)
expo_others_arr = np.array(expo_others_list)
amp_water_arr = np.array(amp_water_list)
expo_water_arr = np.array(expo_water_list)
# if we have over 5 iterations we fit a 2nd order polynomial
# to the lowest 5 amplitudes
if iteration > 5:
if not force_airmass:
# get others lists as array and sort them
sortmask = np.argsort(np.abs(amp_others_arr))
amp_others_arr = amp_others_arr[sortmask]
expo_others_arr = expo_others_arr[sortmask]
# polyfit lowest 5 others terms
fit_others = np.polyfit(amp_others_arr[0: 4],
expo_others_arr[0:4], 1)
# get water lists as arrays and sort them
sortmask = np.argsort(np.abs(amp_water_arr))
amp_water_arr = amp_water_arr[sortmask]
expo_water_arr = expo_water_arr[sortmask]
# polyfit lowest 5 water terms
fit_water = np.polyfit(amp_water_arr[0:4],
expo_water_arr[0:4], 1)
# else just fit a line
else:
if not force_airmass:
fit_others = np.polyfit(amp_others_arr, expo_others_arr, 1)
fit_water = np.polyfit(amp_water_arr, expo_water_arr, 1)
# --------------------------------------------------------------
# find best guess for other species exponent
expo_others = float(fit_others[1])
# deal with lower bounds for other species
if expo_others < others_bounds[0]:
# update qc params
qc_values[2] = float(fit_others[1])
qc_pass[2] = 0
# set expo_others to lower others bound
expo_others = float(others_bounds[0])
# flag qc as failed and break
flag_qc = True
else:
qc_values[2] = float(fit_others[1])
qc_pass[2] = 1
# deal with upper bounds for other species
if expo_others > others_bounds[1]:
# update qc params
qc_values[3] = float(fit_others[1])
qc_pass[3] = 0
# set the expo_others to the upper others bound
expo_others = float(others_bounds[1])
# flag qc as failed and break
flag_qc = True
else:
qc_values[3] = float(fit_others[1])
qc_pass[3] = 1
# --------------------------------------------------------------
# find best guess for water exponent
expo_water = float(fit_water[1])
# deal with lower bounds for water
if expo_water < water_bounds[0]:
# update qc params
qc_values[4] = float(fit_water[1])
qc_pass[4] = 0
# set the expo_water to the lower water bound
expo_water = float(water_bounds[0])
# flag qc as failed and break
flag_qc = True
else:
qc_values[4] = float(fit_water[1])
qc_pass[4] = 1
# deal with upper bounds for water
if expo_water > water_bounds[1]:
# update qc params
qc_values[5] = float(fit_water[1])
qc_pass[5] = 0
# set the expo_water to the upper water bound
expo_water = float(water_bounds[1])
# flag qc as failed and break
flag_qc = True
else:
qc_values[5] = float(fit_water[1])
qc_pass[5] = 1
# --------------------------------------------------------------
# check whether we have converged yet (by updating dexpo)
if force_airmass:
dexpo = np.abs(expo_water_prev - expo_water)
else:
part1 = expo_water_prev - expo_water
part2 = expo_others_prev - expo_others
dexpo = np.sqrt(part1 ** 2 + part2 ** 2)
# break if qc flag True don't try to converge
if flag_qc:
break
# --------------------------------------------------------------
# keep track of the convergence params
expo_water_prev = float(expo_water)
expo_others_prev = float(expo_others)
# ------------------------------------------------------------------
# storage for plotting
dd_iterations.append(drange)
ccf_water_iterations.append(np.array(ccf_water))
ccf_others_iterations.append(np.array(ccf_others))
# ------------------------------------------------------------------
# finally add one to the iterator
iteration += 1
# ----------------------------------------------------------------------
# deal with iterations hitting the max (no convergence)
if iteration == max_iterations - 1:
# update qc params
qc_values[6] = iteration
qc_pass[6] = 0
flag_qc = True
else:
qc_values[6] = iteration
qc_pass[6] = 1
# ----------------------------------------------------------------------
# deal with the qc flags
if flag_qc:
# log that qc flagged
for qit in range(len(qc_pass)):
if qc_pass[qit] == 0:
wargs = [qc_logic[qit], qc_names[qit], qc_values[qit]]
wmsg = 'Pre cleaning failed. \n\tCriteria: {0} \n\tActual: {1} = {2}'
WLOG(params, 'warning', wmsg.format(*wargs))
qc_params = [qc_names, qc_values, qc_logic, qc_pass]
# return qc_exit_tellu_preclean
return qc_exit_tellu_preclean(params, recipe, image_e2ds, infile,
wave_e2ds, qc_params, sky_model)
# ----------------------------------------------------------------------
# show CCF plot to see if correlation peaks have been killed
recipe.plot('TELLUP_WAVE_TRANS', dd_arr=dd_iterations,
ccf_water_arr=ccf_water_iterations,
ccf_others_arr=ccf_others_iterations)
recipe.plot('SUM_TELLUP_WAVE_TRANS', dd_arr=dd_iterations,
ccf_water_arr=ccf_water_iterations,
ccf_others_arr=ccf_others_iterations)
# plot to show absorption spectrum
recipe.plot('TELLUP_ABSO_SPEC', trans=trans, wave=wavemap,
thres=trans_thres, spectrum=spectrum, spectrum_ini=spectrum_ini,
objname=infile.get_key('KW_OBJNAME', dtype=str),
clean_ohlines=clean_ohlines)
recipe.plot('SUM_TELLUP_ABSO_SPEC', trans=trans, wave=wavemap,
thres=trans_thres, spectrum=spectrum, spectrum_ini=spectrum_ini,
objname=infile.get_key('KW_OBJNAME', dtype=str),
clean_ohlines=clean_ohlines)
# ----------------------------------------------------------------------
# create qc_params (all passed now but we have updated values)
qc_params = [qc_names, qc_values, qc_logic, qc_pass]
# ----------------------------------------------------------------------
# get the final absorption spectrum to be used on the science data.
# No trimming done on the wave grid
abso_e2ds = get_abso_expo(params, wave_e2ds, expo_others, expo_water,
spl_others, spl_water, ww=ker_width,
ex_gau=ker_shape, dv_abso=0.0,
ker_thres=ker_thres, wavestart=wavestart,
waveend=waveend, dvgrid=dvgrid)
# all absorption deeper than exp(trans_thres) is considered too deep to
# be corrected. We set values there to NaN
mask = abso_e2ds < np.exp(2 * trans_thres)
# set deep lines to NaN
abso_e2ds[mask] = np.nan
# ----------------------------------------------------------------------
# now correct the original e2ds file
corrected_e2ds = (image_e2ds_ini - sky_model) / abso_e2ds
# ----------------------------------------------------------------------
# calculate CCF power
keep = np.abs(drange) < (ccf_scan_range / 4)
water_ccfpower = np.nansum(np.gradient(ccf_water[keep] ** 2))
others_ccfpower = np.nansum(np.gradient(ccf_others)[keep] ** 2)
# ----------------------------------------------------------------------
# populate parameter dictionary
props = ParamDict()
props['CORRECTED_E2DS'] = corrected_e2ds
props['TRANS_MASK'] = mask
props['ABSO_E2DS'] = abso_e2ds
props['SKY_MODEL'] = sky_model
props['EXPO_WATER'] = expo_water
props['EXPO_OTHERS'] = expo_others
props['DV_WATER'] = dv_water
props['DV_OTHERS'] = dv_others
props['CCFPOWER_WATER'] = water_ccfpower
props['CCFPOWER_OTHERS'] = others_ccfpower
props['QC_PARAMS'] = qc_params
# set sources
keys = ['CORRECTED_E2DS', 'TRANS_MASK', 'ABSO_E2DS', 'EXPO_WATER',
'EXPO_OTHERS', 'DV_WATER', 'DV_OTHERS', 'CCFPOWER_WATER',
'CCFPOWER_OTHERS', 'QC_PARAMS', 'SKY_MODEL']
props.set_sources(keys, func_name)
# ----------------------------------------------------------------------
# add constants used (can come from kwargs)
props['TELLUP_DO_PRECLEANING'] = do_precleaning
props['TELLUP_D_WATER_ABSO'] = default_water_abso
props['TELLUP_CCF_SCAN_RANGE'] = ccf_scan_range
props['TELLUP_CLEAN_OH_LINES'] = clean_ohlines
props['TELLUP_REMOVE_ORDS'] = remove_orders
props['TELLUP_SNR_MIN_THRES'] = snr_min_thres
props['TELLUP_DEXPO_CONV_THRES'] = dexpo_thres
props['TELLUP_DEXPO_MAX_ITR'] = max_iterations
props['TELLUP_ABSO_EXPO_KWID'] = ker_width
props['TELLUP_ABSO_EXPO_KEXP'] = ker_shape
props['TELLUP_TRANS_THRES'] = trans_thres
props['TELLUP_TRANS_SIGLIM'] = trans_siglim
props['TELLUP_FORCE_AIRMASS'] = force_airmass
props['TELLUP_OTHER_BOUNDS'] = others_bounds
props['TELLUP_WATER_BOUNDS'] = water_bounds
props['TELLUP_ABSO_EXPO_KTHRES'] = ker_thres
props['TELLUP_WAVE_START'] = wavestart
props['TELLUP_WAVE_END'] = waveend
props['TELLUP_DVGRID'] = dvgrid
# set sources
keys = ['TELLUP_D_WATER_ABSO', 'TELLUP_CCF_SCAN_RANGE',
'TELLUP_CLEAN_OH_LINES', 'TELLUP_REMOVE_ORDS',
'TELLUP_SNR_MIN_THRES', 'TELLUP_DEXPO_CONV_THRES',
'TELLUP_DEXPO_MAX_ITR', 'TELLUP_ABSO_EXPO_KWID',
'TELLUP_ABSO_EXPO_KEXP', 'TELLUP_TRANS_THRES',
'TELLUP_TRANS_SIGLIM', 'TELLUP_FORCE_AIRMASS',
'TELLUP_OTHER_BOUNDS', 'TELLUP_WATER_BOUNDS',
'TELLUP_ABSO_EXPO_KTHRES', 'TELLUP_WAVE_START',
'TELLUP_WAVE_END', 'TELLUP_DVGRID', 'TELLUP_DO_PRECLEANING']
props.set_sources(keys, func_name)
# ----------------------------------------------------------------------
# save pre-cleaned file
tellu_preclean_write(params, recipe, infile, rawfiles, fiber, combine,
props, wprops)
# ----------------------------------------------------------------------
# return props
return props
def clean_ohline_pca(params, image, wavemap, **kwargs):
# load ohline principle components
func_name = __NAME__ + '.clean_ohline_pca()'
# ----------------------------------------------------------------------
# get parameters from params/kwargs
| |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from nova import context as nova_context
from nova import exception
from nova import objects
from nova.scheduler import weights
from nova.tests import fixtures as nova_fixtures
from nova.tests.functional import integrated_helpers
from nova.tests.unit import fake_notifier
from nova.tests.unit.image import fake as fake_image
from nova import utils
class HostNameWeigher(weights.BaseHostWeigher):
# TestMultiCellMigrate creates host1 in cell1 and host2 in cell2.
# Something about migrating from host1 to host2 teases out failures
# which probably has to do with cell1 being the default cell DB in
# our base test class setup, so prefer host1 to make the tests
# deterministic.
_weights = {'host1': 100, 'host2': 50}
def _weigh_object(self, host_state, weight_properties):
# Any undefined host gets no weight.
return self._weights.get(host_state.host, 0)
class TestMultiCellMigrate(integrated_helpers.ProviderUsageBaseTestCase):
"""Tests for cross-cell cold migration (resize)"""
NUMBER_OF_CELLS = 2
compute_driver = 'fake.MediumFakeDriver'
def setUp(self):
# Use our custom weigher defined above to make sure that we have
# a predictable scheduling sort order during server create.
self.flags(weight_classes=[__name__ + '.HostNameWeigher'],
group='filter_scheduler')
super(TestMultiCellMigrate, self).setUp()
self.cinder = self.useFixture(nova_fixtures.CinderFixture(self))
self._enable_cross_cell_resize()
self.created_images = [] # list of image IDs created during resize
# Adjust the polling interval and timeout for long RPC calls.
self.flags(rpc_response_timeout=1)
self.flags(long_rpc_timeout=60)
# Set up 2 compute services in different cells
self.host_to_cell_mappings = {
'host1': 'cell1', 'host2': 'cell2'}
for host in sorted(self.host_to_cell_mappings):
cell_name = self.host_to_cell_mappings[host]
# Start the compute service on the given host in the given cell.
self._start_compute(host, cell_name=cell_name)
# Create an aggregate where the AZ name is the cell name.
agg_id = self._create_aggregate(
cell_name, availability_zone=cell_name)
# Add the host to the aggregate.
body = {'add_host': {'host': host}}
self.admin_api.post_aggregate_action(agg_id, body)
def _enable_cross_cell_resize(self):
# Enable cross-cell resize policy since it defaults to not allow
# anyone to perform that type of operation. For these tests we'll
# just allow admins to perform cross-cell resize.
# TODO(mriedem): Uncomment this when the policy rule is added and
# used in the compute API _allow_cross_cell_resize method. For now
# we just stub that method to return True.
# self.policy_fixture.set_rules({
# servers_policies.CROSS_CELL_RESIZE:
# base_policies.RULE_ADMIN_API},
# overwrite=False)
self.stub_out('nova.compute.api.API._allow_cross_cell_resize',
lambda *a, **kw: True)
def assertFlavorMatchesAllocation(self, flavor, allocation,
volume_backed=False):
self.assertEqual(flavor['vcpus'], allocation['VCPU'])
self.assertEqual(flavor['ram'], allocation['MEMORY_MB'])
# Volume-backed instances won't have DISK_GB allocations.
if volume_backed:
self.assertNotIn('DISK_GB', allocation)
else:
self.assertEqual(flavor['disk'], allocation['DISK_GB'])
def assert_instance_fields_match_flavor(self, instance, flavor):
self.assertEqual(instance.memory_mb, flavor['ram'])
self.assertEqual(instance.vcpus, flavor['vcpus'])
self.assertEqual(instance.root_gb, flavor['disk'])
self.assertEqual(
instance.ephemeral_gb, flavor['OS-FLV-EXT-DATA:ephemeral'])
def _count_volume_attachments(self, server_id):
attachment_ids = self.cinder.attachment_ids_for_instance(server_id)
return len(attachment_ids)
def assert_quota_usage(self, expected_num_instances):
limits = self.api.get_limits()['absolute']
self.assertEqual(expected_num_instances, limits['totalInstancesUsed'])
def _create_server(self, flavor, volume_backed=False):
"""Creates a server and waits for it to be ACTIVE
:param flavor: dict form of the flavor to use
:param volume_backed: True if the server should be volume-backed
:returns: server dict response from the GET /servers/{server_id} API
"""
# Provide a VIF tag for the pre-existing port. Since VIF tags are
# stored in the virtual_interfaces table in the cell DB, we want to
# make sure those survive the resize to another cell.
networks = [{
'port': self.neutron.port_1['id'],
'tag': 'private'
}]
image_uuid = fake_image.get_valid_image_id()
server = self._build_minimal_create_server_request(
self.api, 'test_cross_cell_resize',
image_uuid=image_uuid,
flavor_id=flavor['id'],
networks=networks)
# Put a tag on the server to make sure that survives the resize.
server['tags'] = ['test']
if volume_backed:
bdms = [{
'boot_index': 0,
'uuid': nova_fixtures.CinderFixture.IMAGE_BACKED_VOL,
'source_type': 'volume',
'destination_type': 'volume',
'tag': 'root'
}]
server['block_device_mapping_v2'] = bdms
# We don't need the imageRef for volume-backed servers.
server.pop('imageRef', None)
server = self.api.post_server({'server': server})
server = self._wait_for_state_change(self.admin_api, server, 'ACTIVE')
# For volume-backed make sure there is one attachment to start.
if volume_backed:
self.assertEqual(1, self._count_volume_attachments(server['id']),
self.cinder.volume_to_attachment)
return server
def stub_image_create(self):
"""Stubs the _FakeImageService.create method to track created images"""
original_create = self.image_service.create
def image_create_snooper(*args, **kwargs):
image = original_create(*args, **kwargs)
self.created_images.append(image['id'])
return image
_p = mock.patch.object(
self.image_service, 'create', side_effect=image_create_snooper)
_p.start()
self.addCleanup(_p.stop)
def _resize_and_validate(self, volume_backed=False, stopped=False,
target_host=None):
"""Creates and resizes the server to another cell. Validates various
aspects of the server and its related records (allocations, migrations,
actions, VIF tags, etc).
:param volume_backed: True if the server should be volume-backed, False
if image-backed.
:param stopped: True if the server should be stopped prior to resize,
False if the server should be ACTIVE
:param target_host: If not None, triggers a cold migration to the
specified host.
:returns: tuple of:
- server response object
- source compute node resource provider uuid
- target compute node resource provider uuid
- old flavor
- new flavor
"""
# Create the server.
flavors = self.api.get_flavors()
old_flavor = flavors[0]
server = self._create_server(old_flavor, volume_backed=volume_backed)
original_host = server['OS-EXT-SRV-ATTR:host']
image_uuid = None if volume_backed else server['image']['id']
# Our HostNameWeigher ensures the server starts in cell1, so we expect
# the server AZ to be cell1 as well.
self.assertEqual('cell1', server['OS-EXT-AZ:availability_zone'])
if stopped:
# Stop the server before resizing it.
self.api.post_server_action(server['id'], {'os-stop': None})
self._wait_for_state_change(self.api, server, 'SHUTOFF')
# Before resizing make sure quota usage is only 1 for total instances.
self.assert_quota_usage(expected_num_instances=1)
if target_host:
# Cold migrate the server to the target host.
new_flavor = old_flavor # flavor does not change for cold migrate
body = {'migrate': {'host': target_host}}
expected_host = target_host
else:
# Resize it which should migrate the server to the host in the
# other cell.
new_flavor = flavors[1]
body = {'resize': {'flavorRef': new_flavor['id']}}
expected_host = 'host1' if original_host == 'host2' else 'host2'
self.stub_image_create()
self.api.post_server_action(server['id'], body)
# Wait for the server to be resized and then verify the host has
# changed to be the host in the other cell.
server = self._wait_for_state_change(self.api, server, 'VERIFY_RESIZE')
self.assertEqual(expected_host, server['OS-EXT-SRV-ATTR:host'])
# Assert that the instance is only listed one time from the API (to
# make sure it's not listed out of both cells).
# Note that we only get one because the DB API excludes hidden
# instances by default (see instance_get_all_by_filters_sort).
servers = self.api.get_servers()
self.assertEqual(1, len(servers),
'Unexpected number of servers: %s' % servers)
self.assertEqual(expected_host, servers[0]['OS-EXT-SRV-ATTR:host'])
# And that there is only one migration record.
migrations = self.api.api_get(
'/os-migrations?instance_uuid=%s' % server['id']
).body['migrations']
self.assertEqual(1, len(migrations),
'Unexpected number of migrations records: %s' %
migrations)
migration = migrations[0]
self.assertEqual('finished', migration['status'])
# There should be at least two actions, one for create and one for the
# resize. There will be a third action if the server was stopped.
actions = self.api.api_get(
'/servers/%s/os-instance-actions' % server['id']
).body['instanceActions']
expected_num_of_actions = 3 if stopped else 2
self.assertEqual(expected_num_of_actions, len(actions), actions)
# Each action should have events (make sure these were copied from
# the source cell to the target cell).
for action in actions:
detail = self.api.api_get(
'/servers/%s/os-instance-actions/%s' % (
server['id'], action['request_id'])).body['instanceAction']
self.assertNotEqual(0, len(detail['events']), detail)
# The tag should still be present on the server.
self.assertEqual(1, len(server['tags']),
'Server tags not found in target cell.')
self.assertEqual('test', server['tags'][0])
# Confirm the source node has allocations for the old flavor and the
# target node has allocations for the new flavor.
source_rp_uuid = self._get_provider_uuid_by_host(original_host)
# The source node allocations should be on the migration record.
source_allocations = self._get_allocations_by_provider_uuid(
source_rp_uuid)[migration['uuid']]['resources']
self.assertFlavorMatchesAllocation(
old_flavor, source_allocations, volume_backed=volume_backed)
target_rp_uuid = self._get_provider_uuid_by_host(expected_host)
# The target node allocations should be on the instance record.
target_allocations = self._get_allocations_by_provider_uuid(
target_rp_uuid)[server['id']]['resources']
self.assertFlavorMatchesAllocation(
new_flavor, target_allocations, volume_backed=volume_backed)
# The instance, in the target cell DB, should have the old and new
# flavor stored with it with the values we expect at this point.
target_cell_name = self.host_to_cell_mappings[expected_host]
self.assertEqual(
target_cell_name, server['OS-EXT-AZ:availability_zone'])
target_cell = self.cell_mappings[target_cell_name]
admin_context = nova_context.get_admin_context()
with nova_context.target_cell(admin_context, target_cell) as cctxt:
inst = objects.Instance.get_by_uuid(
cctxt, server['id'], expected_attrs=['flavor'])
self.assertIsNotNone(
inst.old_flavor,
'instance.old_flavor not saved in target cell')
self.assertIsNotNone(
inst.new_flavor,
'instance.new_flavor not saved in target cell')
self.assertEqual(inst.flavor.flavorid, inst.new_flavor.flavorid)
if target_host: # cold migrate so flavor does not change
self.assertEqual(
inst.flavor.flavorid, inst.old_flavor.flavorid)
else:
self.assertNotEqual(
inst.flavor.flavorid, inst.old_flavor.flavorid)
self.assertEqual(old_flavor['id'], inst.old_flavor.flavorid)
self.assertEqual(new_flavor['id'], inst.new_flavor.flavorid)
# Assert the ComputeManager._set_instance_info fields
# are | |
<reponame>cfgoldsmith/RMG-Py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
###############################################################################
# #
# RMG - Reaction Mechanism Generator #
# #
# Copyright (c) 2002-2019 Prof. <NAME> (<EMAIL>), #
# Prof. <NAME> (<EMAIL>) and the RMG Team (<EMAIL>) #
# #
# Permission is hereby granted, free of charge, to any person obtaining a #
# copy of this software and associated documentation files (the 'Software'), #
# to deal in the Software without restriction, including without limitation #
# the rights to use, copy, modify, merge, publish, distribute, sublicense, #
# and/or sell copies of the Software, and to permit persons to whom the #
# Software is furnished to do so, subject to the following conditions: #
# #
# The above copyright notice and this permission notice shall be included in #
# all copies or substantial portions of the Software. #
# #
# THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, EXPRESS OR #
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, #
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE #
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER #
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING #
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER #
# DEALINGS IN THE SOFTWARE. #
# #
###############################################################################
import math
import logging
import os.path
import numpy
import rmgpy.constants as constants
from rmgpy.exceptions import InputError
from rmgpy.statmech import IdealGasTranslation, NonlinearRotor, LinearRotor, HarmonicOscillator, Conformer
from arkane.common import check_conformer_energy, get_element_mass
from arkane.log import Log
################################################################################
class QChemLog(Log):
"""
Represent an output file from QChem. The attribute `path` refers to the
location on disk of the QChem output file of interest. Methods are provided
to extract a variety of information into Arkane classes and/or NumPy
arrays.
"""
def __init__(self, path):
self.path = path
def getNumberOfAtoms(self):
"""
Return the number of atoms in the molecular configuration used in
the QChem output file.
"""
Natoms = 0
# Open QChem log file for parsing
f = open(self.path, 'r')
line = f.readline()
while line != '' and Natoms == 0:
# Automatically determine the number of atoms
if 'Standard Nuclear Orientation' in line and Natoms == 0:
for i in range(3): line = f.readline()
while '----------------------------------------------------' not in line:
Natoms += 1
line = f.readline()
line = f.readline()
# Close file when finished
f.close()
# Return the result
return Natoms
def loadForceConstantMatrix(self):
"""
Return the force constant matrix (in Cartesian coordinates) from the
QChem log file. If multiple such matrices are identified,
only the last is returned. The units of the returned force constants
are J/m^2. If no force constant matrix can be found in the log file,
``None`` is returned.
"""
F = None
Natoms = self.getNumberOfAtoms()
Nrows = Natoms * 3
f = open(self.path, 'r')
line = f.readline()
while line != '':
# Read force constant matrix
if 'Final Hessian.' in line or 'Hessian of the SCF Energy' in line:
F = numpy.zeros((Nrows,Nrows), numpy.float64)
for i in range(int(math.ceil(Nrows / 6.0))):
# Header row
line = f.readline()
# Matrix element rows
for j in range(Nrows): #for j in range(i*6, Nrows):
data = f.readline().split()
for k in range(len(data)-1):
F[j,i*6+k] = float(data[k+1])
#F[i*5+k,j] = F[j,i*5+k]
# Convert from atomic units (Hartree/Bohr_radius^2) to J/m^2
F *= 4.35974417e-18 / 5.291772108e-11**2
line = f.readline()
# Close file when finished
f.close()
return F
def loadGeometry(self):
"""
Return the optimum geometry of the molecular configuration from the
QChem log file. If multiple such geometries are identified, only the
last is returned.
"""
atom, coord, number, mass = [], [], [], []
with open(self.path) as f:
log = f.read().splitlines()
# First check that the QChem job file (not necessarily a geometry optimization)
# has successfully completed, if not an error is thrown
completed_job = False
for line in reversed(log):
if 'Total job time:' in line:
logging.debug('Found a sucessfully completed QChem Job')
completed_job = True
break
if not completed_job:
raise InputError('Could not find a successfully completed QChem job in QChem output file {0}'.format(self.path))
# Now look for the geometry.
# Will return the final geometry in the file under Standard Nuclear Orientation.
geometry_flag = False
for i in reversed(xrange(len(log))):
line = log[i]
if 'Standard Nuclear Orientation' in line:
for line in log[(i+3):]:
if '------------' not in line:
data = line.split()
atom.append(data[1])
coord.append([float(c) for c in data [2:]])
geometry_flag = True
else:
break
if geometry_flag:
break
# Assign appropriate mass to each atom in the molecule
for atom1 in atom:
mass1, num1 = get_element_mass(atom1)
mass.append(mass1)
number.append(num1)
coord = numpy.array(coord, numpy.float64)
number = numpy.array(number, numpy.int)
mass = numpy.array(mass, numpy.float64)
if len(number) == 0 or len(coord) == 0 or len(mass) == 0:
raise InputError('Unable to read atoms from QChem geometry output file {0}'.format(self.path))
return coord, number, mass
def loadConformer(self, symmetry=None, spinMultiplicity=0, opticalIsomers=None, label=''):
"""
Load the molecular degree of freedom data from an output file created as the result of a
QChem "Freq" calculation. As QChem's guess of the external symmetry number is not always correct,
you can use the `symmetry` parameter to substitute your own value;
if not provided, the value in the QChem output file will be adopted.
"""
modes = []; freq = []; mmass = []; rot = []; inertia = []
unscaled_frequencies = []
E0 = 0.0
if opticalIsomers is None or symmetry is None:
_opticalIsomers, _symmetry = self.get_optical_isomers_and_symmetry_number()
if opticalIsomers is None:
opticalIsomers = _opticalIsomers
if symmetry is None:
symmetry = _symmetry
f = open(self.path, 'r')
line = f.readline()
while line != '':
# Read spin multiplicity if not explicitly given
if '$molecule' in line and spinMultiplicity == 0:
line = f.readline()
if len(line.split()) == 2:
spinMultiplicity = int(float(line.split()[1]))
logging.debug('Conformer {0} is assigned a spin multiplicity of {1}'.format(label,spinMultiplicity))
# The rest of the data we want is in the Thermochemistry section of the output
elif 'VIBRATIONAL ANALYSIS' in line:
modes = []
line = f.readline()
while line != '':
# This marks the end of the thermochemistry section
if 'Thank you very much for using Q-Chem.' in line:
break
# Read vibrational modes
elif 'VIBRATIONAL FREQUENCIES (CM**-1)' in line:
frequencies = []
while 'STANDARD THERMODYNAMIC QUANTITIES AT' not in line:
if ' Frequency:' in line:
if len(line.split()) == 4:
frequencies.extend([float(d) for d in line.split()[-3:]])
elif len(line.split()) == 3:
frequencies.extend([float(d) for d in line.split()[-2:]])
elif len(line.split()) == 2:
frequencies.extend([float(d) for d in line.split()[-1:]])
line = f.readline()
line = f.readline()
# If there is an imaginary frequency, remove it
if frequencies[0] < 0.0:
frequencies = frequencies[1:]
unscaled_frequencies = frequencies
vibration = HarmonicOscillator(frequencies=(frequencies,"cm^-1"))
# modes.append(vibration)
freq.append(vibration)
# Read molecular mass for external translational modes
elif 'Molecular Mass:' in line:
mass = float(line.split()[2])
translation = IdealGasTranslation(mass=(mass,"amu"))
# modes.append(translation)
mmass.append(translation)
# Read moments of inertia for external rotational modes, given in atomic units
elif 'Eigenvalues --' in line:
inertia = [float(d) for d in line.split()[-3:]]
# Read the next line in the file
line = f.readline()
# Read the next line in the file
line = f.readline()
if len(inertia):
if inertia[0] == 0.0:
# If the first eigenvalue is 0, the rotor is linear
inertia.remove(0.0)
logging.debug('inertia is {}'.format(str(inertia)))
for i in range(2):
inertia[i] *= (constants.a0 / 1e-10) ** 2
inertia = numpy.sqrt(inertia[0] * inertia[1])
rotation = LinearRotor(inertia=(inertia, "amu*angstrom^2"), symmetry=symmetry)
rot.append(rotation)
else:
for i in range(3):
inertia[i] *= (constants.a0 / 1e-10) ** 2
rotation = NonlinearRotor(inertia=(inertia, "amu*angstrom^2"), symmetry=symmetry)
# modes.append(rotation)
rot.append(rotation)
inertia = []
# Close file when finished
f.close()
modes = mmass + rot + freq
return Conformer(E0=(E0*0.001,"kJ/mol"), modes=modes, spinMultiplicity=spinMultiplicity,
opticalIsomers=opticalIsomers), unscaled_frequencies
def loadEnergy(self, frequencyScaleFactor=1.):
"""
Load the energy in J/mol from a QChem log file. Only the last energy
in the file is returned. The zero-point energy is *not* included in
the returned value.
"""
e0 = None
with open(self.path, 'r') as f:
a = b = 0
for line in f:
if 'Final energy is' in line:
a = float(line.split()[3]) * constants.E_h * constants.Na
if 'Total energy in the final basis set' in | |
<reponame>anonconda/TranSuite<filename>lib/transfix/transfix_tools.py<gh_stars>10-100
import os
import sys
import time
from collections import namedtuple, defaultdict
from Bio.Seq import Seq
from lib.parsing.gtf_object_tools import create_gtf_object
from lib.transfix.gene_overlap_module import get_overlap_percentage, group_transcripts_by_overlap
from lib.findlorf.findlorf_tools import convert_from_genomic_to_relative, split_cds
def write_table(my_list, to_keep, filename, sep=","):
outfile = open(filename, 'w')
outfile.writelines(f"Chromosome{sep}Strand{sep}Gene_ID{sep}Transcript_ID\n")
for item in sorted(my_list):
# Table row format: chromosome strand gene_ID transcript_ID
*_, trans_id = item.strip("\n").split(sep)
if trans_id in to_keep:
outfile.writelines(item+'\n')
outfile.close()
def is_inside_an_exon(coordinate, exon_list):
# TODO check if I can use this method to check the generate start-stop codons in the "gtf_object_tools" file
# Check that the CDS fall inside at least one of the exon coordinates
is_inside_exon_list = [True if exon[0] <= coordinate <= exon[-1] else False for exon in exon_list]
n_trues = [e for e in is_inside_exon_list if e is True]
if len(n_trues) >= 1:
return True
else:
return False
def find_cds_end(nuc_seq, cds_start, trans_id, trans_sense, trans_exons):
# Important! The to_stop=True is super duper important to get the correct length, and thus correct CDS end coord
aa_seq = nuc_seq.translate(to_stop=True)
res_dt = {}
flat = lambda l: [e for sub in l for e in sub]
if trans_sense == "+":
# In the FASTA file the AA sequence doesn't contain the stop codon character,
# therefore we sum the missing +3 nucleotides to its length
aa_len = len(aa_seq)*3 + 3
max_exon = max(flat(trans_exons))
aa_ix = 0
for genomic_ix in range(cds_start, max_exon+1):
if is_inside_an_exon(genomic_ix, trans_exons):
if aa_ix < aa_len:
res_dt[aa_ix] = genomic_ix
aa_ix += 1
else:
break
max_relative = max(res_dt.keys())
cds_end = res_dt[max_relative]
# If CDS end is outside the transcript model, set transcript ends as the CDS end
if cds_end > max_exon:
cds_end = max_exon
# In some cases the CDS end is inside and intron, this is due to the stop codon being created by the
# nucleotides at the edge of two exons; This code corrects the cds end position for these cases
if not is_inside_an_exon(cds_end, trans_exons):
for i in range(0, len(trans_exons)-1):
exon1_st, exon1_end = trans_exons[i]
exon2_st, exon2_end = trans_exons[i+1]
# Check if CDS end fall inside and intron
if exon1_end < cds_end < exon2_st:
# Calculate the amount by which the cds exceed the boundary and calculate the corrected CDS end pos
cds_offset = cds_end - exon1_end
cds_end = exon1_end + cds_offset
break
elif trans_sense == "-":
aa_len = len(aa_seq)*3 + 3
min_exon = min(flat(trans_exons))
aa_ix = 0
# For "-" strand transcripts the sequence is read right to left, therefore we decrease the genomic_mix value
for genomic_ix in range(cds_start, min_exon-1, -1):
if is_inside_an_exon(genomic_ix, trans_exons):
if aa_ix < aa_len:
res_dt[aa_ix] = genomic_ix
aa_ix += 1
else:
break
max_relative = max(res_dt.keys())
cds_end = res_dt[max_relative]
if cds_end < min_exon:
cds_end = min_exon
if not is_inside_an_exon(cds_end, trans_exons):
for i in range(0, len(trans_exons)-1):
exon1_st, exon1_end = trans_exons[i]
exon2_st, exon2_end = trans_exons[i+1]
if exon1_end < cds_end < exon2_st:
# Due to the "-" strand transcripts being right to left, the offset calculation have to be modified
cds_offset = exon2_st - cds_end
cds_end = exon1_end - cds_offset
break
else:
sys.exit(f"Transcript {trans_id} sense must be either + or -, not {trans_sense}")
return cds_end
def remove_transcripts_without_cds(gtf_file, outfolder):
print(time.asctime(), "Removing invalid transcripts models from annotation file")
gtf_obj = create_gtf_object(gtf_file)
trans_with_cds, trans_without_cds = (set() for _ in range(2))
for trans, trans_cds in gtf_obj.trans_cds_dt.items():
if not trans_cds:
trans_without_cds.add(trans)
else:
trans_with_cds.add(trans)
gtf_rows = []
with open(gtf_file) as fh:
for line in fh:
row = line.strip('\n').split('\t')
gtf_rows.append(row)
filtered_rows = []
for row in gtf_rows:
attr = row[-1]
trans_id = attr.strip("\n").split("transcript_id \"")[-1].split("\";")[0]
if trans_id in trans_without_cds:
continue
else:
filtered_rows.append(row)
# Adding the ".transfix.temp." to the name assure that this file will be removed with the other temporary files
gtf_name = os.path.basename(gtf_file).replace(".gtf", ".transfix.temp.gtf")
gtf_path = os.path.join(outfolder, gtf_name)
with open(gtf_path, "w+") as fh:
for row in gtf_rows:
line = "\t".join(row)+"\n"
fh.write(line)
return gtf_path, trans_with_cds, trans_without_cds
def get_transcript_data_from_gff_obj(gene_id, locus_dict, trans_sequences_dt):
gene_model = locus_dict[gene_id]
transcript_dict = gene_model.transcript_dict
TransData = namedtuple('TransData', 'chrom sense gene id exons start end seq')
transdata_dt = {}
for transcript_id in transcript_dict:
transcript_model = transcript_dict[transcript_id]
try:
trans_seq = trans_sequences_dt[transcript_id]
except KeyError:
trans_seq = ""
trans_chrom = transcript_model.chrom
trans_sense = transcript_model.sense
trans_gene = transcript_model.gene
trans_id = transcript_id
trans_exons = transcript_model.exon_list
trans_start = transcript_model.exon_list[0][0]
trans_end = transcript_model.exon_list[-1][-1]
trans_data = TransData(chrom=trans_chrom, sense=trans_sense, gene=trans_gene, id=trans_id,
exons=trans_exons, start=trans_start, end=trans_end, seq=trans_seq)
transdata_dt[transcript_id] = trans_data
return transdata_dt
def get_transcript_data_from_gtf_obj(gene_id, gtf_obj, trans_sequences_dt):
TransData = namedtuple('TransData', 'chrom sense gene id exons start end seq')
transdata_dt = {}
for transcript_id in gtf_obj.gene_trans_dt[gene_id]:
try:
trans_seq = trans_sequences_dt[transcript_id]
except KeyError:
trans_seq = ""
trans_chrom = gtf_obj.trans_chrom_dt[transcript_id]
trans_sense = gtf_obj.trans_sense_dt[transcript_id]
trans_gene = gtf_obj.trans_gene_dt[transcript_id]
trans_id = transcript_id
trans_exons = gtf_obj.trans_exons_dt[transcript_id]
trans_start = trans_exons[0][0]
trans_end = trans_exons[-1][-1]
trans_data = TransData(chrom=trans_chrom, sense=trans_sense, gene=trans_gene, id=trans_id,
exons=trans_exons, start=trans_start, end=trans_end, seq=trans_seq)
transdata_dt[transcript_id] = trans_data
return transdata_dt
def fix_atg_position(transcript_data_dt, atg_pos):
# Classification categories
cat_dt = defaultdict(set)
output_dt = defaultdict(dict)
trans_cds_dt, trans_cds_seq_dt, trans_header_dt = [{} for _ in range(3)]
for transcript_id in transcript_data_dt:
# trans_data is a namedtuple
trans_data = transcript_data_dt[transcript_id]
trans_chrom = trans_data.chrom
trans_sense = trans_data.sense
trans_gene = trans_data.gene
trans_id = trans_data.id
trans_exons = trans_data.exons
trans_start = trans_data.start
trans_end = trans_data.end
trans_seq = trans_data.seq
if atg_pos is None:
cat_dt["cds_not_found"].add(transcript_id)
# cds_not_found.add(transcript_id)
line = f"{trans_chrom},{trans_sense},{trans_gene},{trans_id}"
cat_dt["cds_not_found_lines"].add(line)
# cds_not_found_lines.add(line)
continue
if not trans_seq:
cat_dt["seq_not_present"].add(transcript_id)
# seq_not_present.add(transcript_id)
line = f"{trans_chrom},{trans_sense},{trans_gene},{trans_id}"
cat_dt["seq_not_present_lines"].add(line)
# seq_not_present_lines.add(line)
continue
if not trans_start <= atg_pos <= trans_end:
cat_dt["atg_not_in_cds"].add(transcript_id)
# atg_not_in_cds.add(transcript_id)
line = f"{trans_chrom},{trans_sense},{trans_gene},{trans_id}"
cat_dt["atg_not_in_cds_lines"].add(line)
# atg_not_in_cds_lines.add(line)
continue
lookup_table = convert_from_genomic_to_relative(trans_exons)
# Check if the start CDS is contain within an exon
if atg_pos not in set(lookup_table.keys()):
cat_dt["atg_not_in_cds"].add(transcript_id)
# atg_not_in_cds.add(transcript_id)
line = f"{trans_chrom},{trans_sense},{trans_gene},{trans_id}"
cat_dt["atg_not_in_cds_lines"].add(line)
# atg_not_in_cds_lines.add(line)
continue
cds_index = lookup_table[atg_pos]
seq = Seq(trans_seq)
# Get CDS section of the sequence (from start)
cds_seq = seq[cds_index:]
# The sequences in the FASTA file are already reverse_complemented for transcripts in the - strand;
# That is, sequences in FASTA file are annotated as read by the molecular machinery, right-to-left, and
# with the complementary nucleotides;
# However, the cds_index represent the position as read from left-to-right;
# Therefore, the following sequence transformation are necessary to cut the sequence in the proper posit
if trans_sense == '-':
seq = seq.reverse_complement()
cds_seq = seq[:cds_index+1]
cds_seq = cds_seq.reverse_complement()
if cds_seq.startswith('ATG'):
trans_cds_seq_dt[transcript_id] = cds_seq
# Translate only transcripts starting with 'ATG'
peptide = True
else:
peptide = False
if not peptide:
cat_dt["rejected_start_codons"].add(atg_pos)
# rejected_start_codons.add(atg_pos)
cat_dt["start_codon_not_atg"].add(transcript_id)
# start_codon_not_atg.add(transcript_id)
line = f"{trans_chrom},{trans_sense},{trans_gene},{trans_id}"
cat_dt["start_codon_not_atg_lines"].add(line)
# start_codon_not_atg_lines.add(line)
continue
else:
# Get information to annotate the updated CDS coordinates
cds_end = find_cds_end(cds_seq, atg_pos, transcript_id, trans_sense, trans_exons)
# cds_pair order is important for re-annotation (cds_1 < cds_2)
cds_1, cds_2 = min({atg_pos, cds_end}), max({atg_pos, cds_end})
cds_pair = (cds_1, cds_2)
trans_cds_list = split_cds(cds_pair, trans_exons, transcript_id)
trans_cds_dt[transcript_id] = trans_cds_list
# Information for the transcripts header in fasta file
trans_header = f">{transcript_id} | {trans_gene} | {trans_chrom}:{atg_pos}-{cds_end}"
trans_header_dt[transcript_id] = trans_header
# Track processed transcripts to ignore them in the next iteration
cat_dt["processed_transcripts"].add(transcript_id)
# processed_transcripts.add(transcript_id)
output_dt["trans_cds_dt"] = trans_cds_dt
output_dt["trans_cds_seq_dt"] = trans_cds_seq_dt
output_dt["trans_header_dt"] = trans_header_dt
return output_dt, cat_dt
def get_gene_longest_CDS(gene_id, gtf_obj):
get_cds_len = lambda cds_list: sum([max(cds_pair)-min(cds_pair)+1 for cds_pair in cds_list])
longest_cds, longest_cds_len = [], 0
for trans_id in gtf_obj.gene_trans_dt[gene_id]:
try:
trans_cds = gtf_obj.trans_cds_dt[trans_id]
except KeyError as err:
trans_cds = []
trans_cds_len = get_cds_len(trans_cds)
if trans_cds_len >= longest_cds_len:
longest_cds_len = trans_cds_len
longest_cds = trans_cds
return longest_cds
def get_gene_groups(gtf_obj, chimeric_genes=None):
if not chimeric_genes:
print("WARNING: No set of chimeric genes specified!")
chimeric_genes = set()
get_cds_len = lambda cds_list: sum([max(cds_pair)-min(cds_pair)+1 for cds_pair in cds_list])
get_gene_ids = lambda t_group: set([gtf_obj.trans_gene_dt[t_id] for t_id in t_group])
gene_groups_dt = {}
for chrom, strand_transcripts in gtf_obj.chrom_trans_dt.items():
# Strand is important to identify what is the "first" gene that makes up a chimeric
chrom_strand = chrom[-1]
if chrom_strand not in {"+", "-"}:
print(f'WARNING: Skipping Scaffold "{chrom}" due to unrecognized strand.')
overlapping_transcripts = group_transcripts_by_overlap(gtf_obj, strand_transcripts)
for overlap_group in overlapping_transcripts:
gene_ids = get_gene_ids(overlap_group)
# Remove chimeric genes from the results (values) but not the keys!
gene_group = [g_id for g_id in gene_ids if g_id not in chimeric_genes]
# Sort by length of the CDS
gene_ids = sorted(gene_ids, key=lambda g_id: get_cds_len(get_gene_longest_CDS(g_id, gtf_obj)))
for g_id in gene_ids:
# Remove self-references (Gene ID don't need to be in its group, as it is the dict | |
<reponame>aronhelser/cumulus<gh_stars>10-100
#!/usr/bin/env python
# -*- coding: utf-8 -*-
###############################################################################
# Copyright 2015 Kitware Inc.
#
# Licensed under the Apache License, Version 2.0 ( the "License" );
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
from tests import base
import json
from girder.utility.model_importer import ModelImporter
def setUpModule():
base.enabledPlugins.append('cumulus')
base.startServer()
def tearDownModule():
base.stopServer()
class JobTestCase(base.TestCase):
def setUp(self):
super(JobTestCase, self).setUp()
users = ({
'email': '<EMAIL>',
'login': 'cumulus',
'firstName': 'First',
'lastName': 'Last',
'password': '<PASSWORD>'
}, {
'email': '<EMAIL>',
'login': 'regularuser',
'firstName': 'First',
'lastName': 'Last',
'password': '<PASSWORD>'
}, {
'email': '<EMAIL>',
'login': 'another',
'firstName': 'First',
'lastName': 'Last',
'password': '<PASSWORD>'
})
self._cumulus, self._user, self._another_user = \
[ModelImporter.model('user').createUser(**user) for user in users]
self._group = ModelImporter.model('group').createGroup('cumulus', self._cumulus)
def test_create(self):
body = {
'onComplete': {
'cluster': 'terminate'
},
'input': [
{
'itemId': '546a1844ff34c70456111185',
'path': ''
}
],
'commands': [
''
],
'name': '',
'output': [{
'itemId': '546a1844ff34c70456111185'
}]
}
json_body = json.dumps(body)
r = self.request('/jobs', method='POST',
type='application/json', body=json_body, user=self._user)
# Name can't be empty
self.assertStatus(r, 400)
# Now try with valid name
body['name'] = 'testing'
json_body = json.dumps(body)
r = self.request('/jobs', method='POST',
type='application/json', body=json_body, user=self._user)
self.assertStatus(r, 201)
del r.json['_id']
expected_job = {u'status': u'created', u'userId': str(self._user['_id']), u'commands': [u''], u'name': u'testing', u'onComplete': {u'cluster': u'terminate'}, u'output': [{
u'itemId': u'546a1844ff34c70456111185'}], u'input': [{u'itemId': u'546a1844ff34c70456111185', u'path': u''}]}
self.assertEqual(r.json, expected_job)
body = {
'onComplete': {
'cluster': 'terminate'
},
'input': [
{
'itemId': '546a1844ff34c70456111185',
'path': ''
}
],
'name': 'test',
'output': {
'itemId': '546a1844ff34c70456111185'
}
}
json_body = json.dumps(body)
r = self.request('/jobs', method='POST',
type='application/json', body=json_body, user=self._user)
# Must provide commands or jobId
self.assertStatus(r, 400)
# Create test script
body = {
'commands': ['echo "test"'],
'name': 'test'
}
body = json.dumps(body)
r = self.request('/scripts', method='POST',
type='application/json', body=body, user=self._cumulus)
self.assertStatus(r, 201)
self.assertEqual(r.json['commands'], ['echo "test"'])
self.assertEqual(r.json['name'], 'test')
script_id = r.json['_id']
# Create job using script
body = {
'onComplete': {
'cluster': 'terminate'
},
'onTerminate': {
'scriptId': script_id
},
'input': [
{
'itemId': '546a1844ff34c70456111185',
'path': ''
}
],
'scriptId': script_id,
'name': 'test',
'output': [{
'itemId': '546a1844ff34c70456111185'
}]
}
json_body = json.dumps(body)
r = self.request('/jobs', method='POST',
type='application/json', body=json_body, user=self._user)
# Doesn't have access to script
self.assertStatus(r, 403)
# Create with correct user
r = self.request('/jobs', method='POST',
type='application/json', body=json_body, user=self._cumulus)
self.assertStatus(r, 201)
expected_job = {u'status': u'created', u'userId': str(self._cumulus['_id']), u'commands': [u'echo "test"'], u'name': u'test', u'onComplete': {u'cluster': u'terminate'}, u'onTerminate': {'commands': [u'echo "test"']}, u'output': [{
u'itemId': u'546a1844ff34c70456111185'}], u'input': [{u'itemId': u'546a1844ff34c70456111185', u'path': u''}]}
del r.json['_id']
self.assertEqual(r.json, expected_job)
body = {
'onComplete': {
'cluster': 'terminate'
},
'input': [
{
'itemId': '546a1844ff34c70456111185',
'path': ''
}
],
'scriptId': '546a1844ff34c70456111185',
'name': 'test',
'output': {
'itemId': '546a1844ff34c70456111185'
}
}
json_body = json.dumps(body)
r = self.request('/jobs', method='POST',
type='application/json', body=json_body, user=self._cumulus)
# Bogus script id
self.assertStatus(r, 400)
def test_create_job_name_check(self):
body = {
'onComplete': {
'cluster': 'terminate'
},
'input': [
{
'itemId': '546a1844ff34c70456111185',
'path': 'test'
}
],
'commands': ['echo "test"'],
'name': 'test',
'output': [{
'itemId': '546a1844ff34c70456111185'
}]
}
json_body = json.dumps(body)
r = self.request('/jobs', method='POST',
type='application/json', body=json_body, user=self._cumulus)
# input path and name can't be that same
self.assertStatus(r, 400)
def test_get(self):
r = self.request(
'/jobs/546a1844ff34c70456111185', method='GET', user=self._cumulus)
self.assertStatus(r, 404)
body = {
'onComplete': {
'cluster': 'terminate'
},
'input': [
{
'itemId': '546a1844ff34c70456111185',
'path': ''
}
],
'commands': [
''
],
'name': 'test',
'output': [{
'itemId': '546a1844ff34c70456111185'
}]
}
json_body = json.dumps(body)
r = self.request('/jobs', method='POST',
type='application/json', body=json_body, user=self._user)
self.assertStatus(r, 201)
job_id = r.json['_id']
r = self.request('/jobs/%s' %
str(job_id), method='GET', user=self._cumulus)
self.assertStatusOk(r)
expected_job = {u'status': u'created', u'userId': str(self._user['_id']), u'commands': [u''], u'name': u'test', u'onComplete': {u'cluster': u'terminate'}, u'output': [{
u'itemId': u'546a1844ff34c70456111185'}], u'input': [{u'itemId': u'546a1844ff34c70456111185', u'path': u''}],
'_id': str(job_id)}
self.assertEqual(r.json, expected_job)
def test_update(self):
status_body = {
'status': 'testing'
}
r = self.request(
'/jobs/546a1844ff34c70456111185', method='PATCH',
type='application/json', body=json.dumps(status_body),
user=self._cumulus)
self.assertStatus(r, 404)
body = {
'onComplete': {
'cluster': 'terminate'
},
'input': [
{
'itemId': '546a1844ff34c70456111185',
'path': ''
}
],
'commands': [
''
],
'name': 'test',
'output': [{
'itemId': '546a1844ff34c70456111185'
}]
}
json_body = json.dumps(body)
r = self.request('/jobs', method='POST',
type='application/json', body=json_body, user=self._user)
self.assertStatus(r, 201)
job_id = r.json['_id']
r = self.request('/jobs/%s' % str(job_id), method='PATCH',
type='application/json', body=json.dumps(status_body),
user=self._cumulus)
self.assertStatusOk(r)
expected_job = {u'status': u'testing', u'userId': str(self._user['_id']), u'commands': [u''], u'name': u'test', u'onComplete': {u'cluster': u'terminate'}, u'output': [{
u'itemId': u'546a1844ff34c70456111185'}], u'input': [{u'itemId': u'546a1844ff34c70456111185', u'path': u''}],
'_id': str(job_id)}
self.assertEqual(r.json, expected_job)
# Check we get the right server side events
r = self.request('/notification/stream', method='GET', user=self._user,
isJson=False, params={'timeout': 0})
self.assertStatusOk(r)
notifications = self.getSseMessages(r)
self.assertEqual(len(notifications), 2, 'Expecting two notifications')
notification = notifications[1]
notification_type = notification['type']
data = notification['data']
self.assertEqual(notification_type, 'job.status')
expected = {
u'status': u'testing',
u'_id': job_id
}
self.assertEqual(data, expected, 'Unexpected notification data')
body = {
'metadata': {
'my': 'data'
}
}
# Test update metadata property
r = self.request('/jobs/%s' % str(job_id), method='PATCH',
type='application/json', body=json.dumps(body),
user=self._cumulus)
self.assertTrue('metadata' in r.json)
self.assertEqual(r.json['metadata'], body['metadata'])
# Update again
body = {
'metadata': {
'my': 'data2',
'new': 1
}
}
r = self.request('/jobs/%s' % str(job_id), method='PATCH',
type='application/json', body=json.dumps(body),
user=self._cumulus)
self.assertTrue('metadata' in r.json)
self.assertEqual(r.json['metadata'], body['metadata'])
def test_log(self):
body = {
'onComplete': {
'cluster': 'terminate'
},
'input': [
{
'itemId': '546a1844ff34c70456111185',
'path': ''
}
],
'commands': [
''
],
'name': 'test',
'output': [{
'itemId': '546a1844ff34c70456111185'
}]
}
json_body = json.dumps(body)
r = self.request('/jobs', method='POST',
type='application/json', body=json_body, user=self._user)
self.assertStatus(r, 201)
job_id = r.json['_id']
log_entry = {
'msg': 'Some message'
}
r = self.request('/jobs/546a1844ff34c70456111185/log', method='GET',
user=self._user)
self.assertStatus(r, 404)
r = self.request('/jobs/%s/log' % str(job_id), method='POST',
type='application/json', body=json.dumps(log_entry), user=self._user)
self.assertStatusOk(r)
r = self.request('/jobs/%s/log' % str(job_id), method='GET',
user=self._user)
self.assertStatusOk(r)
expected_log = {u'log': [{u'msg': u'Some message'}]}
self.assertEqual(r.json, expected_log)
r = self.request('/jobs/%s/log' % str(job_id), method='POST',
type='application/json', body=json.dumps(log_entry), user=self._user)
self.assertStatusOk(r)
r = self.request('/jobs/%s/log' % str(job_id), method='GET',
user=self._user)
self.assertStatusOk(r)
self.assertEqual(len(r.json['log']), 2)
r = self.request('/jobs/%s/log' % str(job_id), method='GET',
params={'offset': 1}, user=self._user)
self.assertStatusOk(r)
self.assertEqual(len(r.json['log']), 1)
def test_get_status(self):
body = {
'onComplete': {
'cluster': 'terminate'
},
'input': [
{
'itemId': '546a1844ff34c70456111185',
'path': ''
}
],
'commands': [
''
],
'name': 'test',
'output': [{
'itemId': '546a1844ff34c70456111185'
}]
}
json_body = json.dumps(body)
r = self.request('/jobs', method='POST',
type='application/json', body=json_body, user=self._user)
self.assertStatus(r, 201)
job_id = r.json['_id']
r = self.request('/jobs/%s/status' %
str(job_id), method='GET', user=self._user)
self.assertStatusOk(r)
expected_status = {u'status': u'created'}
self.assertEqual(r.json, expected_status)
def test_delete(self):
body = {
'onComplete': {
'cluster': 'terminate'
},
'input': [
{
'itemId': '546a1844ff34c70456111185',
'path': ''
}
],
'commands': [
''
],
'name': 'test',
'output': [{
'itemId': '546a1844ff34c70456111185'
}]
}
json_body = json.dumps(body)
r = self.request('/jobs', method='POST',
type='application/json', body=json_body, user=self._user)
self.assertStatus(r, 201)
job_id = r.json['_id']
r = self.request('/jobs/%s' %
str(job_id), method='DELETE', user=self._cumulus)
self.assertStatusOk(r)
r = self.request('/jobs/%s' %
str(job_id), method='GET', user=self._cumulus)
self.assertStatus(r, 404)
def test_list(self):
def create_job(user, name):
body = {
'onComplete': {
'cluster': 'terminate'
},
'input': [
{
'itemId': '546a1844ff34c70456111185',
'path': ''
}
],
'commands': [
''
],
'name': name,
'output': [{
'itemId': '546a1844ff34c70456111185'
}]
}
json_body = json.dumps(body)
r = self.request('/jobs', method='POST',
type='application/json', body=json_body, user=user)
self.assertStatus(r, 201)
return r.json
create_job(self._user, 'test0')
job1 = create_job(self._another_user, 'test1')
job2 = create_job(self._another_user, 'test2')
r = self.request('/jobs', method='GET',
type='application/json', user=self._another_user)
self.assertStatus(r, 200)
self.assertEqual(len(r.json), 2)
job_ids = [job['_id'] for job in r.json]
self.assertTrue(job1['_id'] in job_ids)
self.assertTrue(job2['_id'] in job_ids)
# Now test limit
params = {
'limit': 1
}
r = self.request('/jobs', method='GET',
type='application/json', params=params,
user=self._another_user)
self.assertStatus(r, 200)
self.assertEqual(len(r.json), 1)
self.assertEqual(r.json[0]['_id'], job1['_id'])
# Now test offset
params = {
'offset': 1
}
r = self.request('/jobs', method='GET',
type='application/json', params=params,
user=self._another_user)
self.assertStatus(r, 200)
self.assertEqual(len(r.json), 1)
self.assertEqual(r.json[0]['_id'], job2['_id'])
def test_delete_running(self):
body = {
'onComplete': {
'cluster': 'terminate'
},
'input': [
{
'itemId': '546a1844ff34c70456111185',
'path': ''
}
],
'commands': [
''
],
'name': 'test',
'output': [{
'itemId': '546a1844ff34c70456111185'
}]
}
json_body = json.dumps(body)
r = self.request('/jobs', method='POST',
type='application/json', body=json_body, user=self._user)
self.assertStatus(r, 201)
job_id = r.json['_id']
body = {
'status': 'running'
}
json_body = json.dumps(body)
r = self.request('/jobs/%s' %
str(job_id), method='PATCH',
type='application/json', body=json_body,
user=self._cumulus)
self.assertStatusOk(r)
r = self.request('/jobs/%s' %
str(job_id), method='DELETE', user=self._cumulus)
self.assertStatus(r, 400)
def test_job_sse(self):
body = {
'onComplete': {
'cluster': 'terminate'
},
'input': [
{
'itemId': '546a1844ff34c70456111185',
'path': ''
}
],
'commands': [
''
| |
<reponame>ytchang05/N-lang<filename>python/scope.py
import math
import os.path
import sys
import lark
from lark import Lark
from colorama import Fore, Style
from variable import Variable
from function import Function
from native_function import NativeFunction
from type import NType, NGenericType, NAliasType, NTypeVars, NModule, apply_generics, apply_generics_to, resolve_equal_types, NClass
from enums import EnumType, EnumValue, EnumPattern
from native_function import NativeFunction
from native_types import n_list_type, n_cmd_type
from ncmd import Cmd
from type_check_error import TypeCheckError, display_type
from display import display_value
from operation_types import binary_operation_types, unary_operation_types, comparable_types, iterable_types, legacy_iterable_types
from file import File
from imported_error import ImportedError
import native_functions
from syntax_error import format_error
from classes import NConstructor
from modules import libraries
basepath = ""
if getattr(sys, 'frozen', False):
basepath = os.path.dirname(sys.executable)
elif __file__:
basepath = os.path.dirname(__file__)
syntaxpath = os.path.join(basepath, "syntax.lark")
def parse_file(file_path, base_path):
import_scope = Scope(base_path=base_path, file_path=file_path)
native_functions.add_funcs(import_scope)
with open(syntaxpath, "r") as f:
parse = f.read()
n_parser = Lark(parse, start="start", propagate_positions=True)
with open(file_path, "r") as f:
file = File(f, name=os.path.relpath(file_path, start=base_path))
try:
tree = file.parse(n_parser)
except lark.exceptions.UnexpectedCharacters as e:
format_error(e, file)
except lark.exceptions.UnexpectedEOF as e:
format_error(e, file)
return import_scope, tree, file
async def eval_file(file_path, base_path):
import_scope, tree, _ = parse_file(file_path, base_path)
import_scope.variables = {**import_scope.variables, **(await parse_tree(tree, import_scope)).variables}
return import_scope
def type_check_file(file_path, base_path):
import_scope, tree, text_file = parse_file(file_path, base_path)
scope = type_check(tree, import_scope)
import_scope.variables = {**import_scope.variables, **scope.variables}
import_scope.public_types = {**import_scope.public_types, **scope.public_types}
import_scope.errors += scope.errors[:]
import_scope.warnings += scope.warnings[:]
return import_scope, text_file
def type_check(tree, import_scope):
scope = import_scope.new_scope(inherit_errors=False)
if tree.data == "start":
for child in tree.children:
scope.type_check_command(child)
else:
scope.errors.append(TypeCheckError(tree, "Internal issue: I cannot type check from a non-starting branch."))
return scope
async def parse_tree(tree, import_scope):
if tree.data == "start":
scope = import_scope.new_scope(inherit_errors=False)
for child in tree.children:
await scope.eval_command(child)
return scope
else:
raise SyntaxError("Unable to run parse_tree on non-starting branch")
def get_destructure_pattern(tree):
if type(tree) == lark.Tree:
if tree.data == "record_pattern":
entries = []
for pattern in tree.children:
if type(pattern) == lark.Token:
entries.append((pattern.value, (pattern.value, pattern)))
else:
key, value = pattern.children
entries.append((key.value, get_destructure_pattern(value)))
return (dict(entries), tree)
elif tree.data == "tuple_pattern":
return (tuple(get_destructure_pattern(pattern) for pattern in tree.children), tree)
elif tree.data == "list_pattern":
patterns = []
for pattern in tree.children:
patterns.append(get_destructure_pattern(pattern))
return (patterns, tree)
elif tree.data == "enum_pattern":
enum_name, *pattern_trees = tree.children
patterns = []
for pattern in pattern_trees:
patterns.append(get_destructure_pattern(pattern))
return (EnumPattern(enum_name, patterns), tree)
return (None if tree.value == "_" else tree.value, tree)
def pattern_to_name(pattern_and_src):
pattern, _ = pattern_and_src
if isinstance(pattern, str):
return pattern
else:
return "<destructuring pattern>"
def get_arguments(tree):
"""
The arguments syntax briefly had the WS token which had to be removed.
"""
arguments = [tree for tree in tree.children if type(tree) == lark.Tree]
if len(arguments) > 0 and arguments[0].data == "generic_declaration":
return arguments[0].children, arguments[1:]
else:
return [], arguments
class Scope:
def __init__(self, parent=None, parent_function=None, errors=None, warnings=None, base_path="", file_path=""):
self.parent = parent
self.parent_function = parent_function
self.variables = {}
self.types = {}
self.public_types = {}
self.errors = errors if errors is not None else []
self.warnings = warnings if warnings is not None else []
# The path of the directory containing the initial file. Used to
# determine the relative path of a file to the starting file.
self.base_path = base_path
# The path of the file the Scope is associated with.
self.file_path = file_path
def new_scope(self, parent_function=None, inherit_errors=True):
return Scope(
self,
parent_function=parent_function or self.parent_function,
errors=self.errors if inherit_errors else [],
warnings=self.warnings if inherit_errors else [],
base_path=self.base_path,
file_path=self.file_path,
)
def get_variable(self, name, err=True):
variable = self.variables.get(name)
if variable is None:
if self.parent:
return self.parent.get_variable(name, err=err)
elif err:
raise NameError("You tried to get a variable/function `%s`, but it isn't defined." % name)
else:
return variable
def get_type(self, name, err=True):
scope_type = self.types.get(name)
if scope_type is None:
if self.parent:
return self.parent.get_type(name, err=err)
elif err:
raise NameError("You tried to get a type `%s`, but it isn't defined." % name)
else:
return scope_type
def get_parent_function(self):
if self.parent_function is None:
if self.parent:
return self.parent.get_parent_function()
else:
return None
else:
return self.parent_function
def get_module_type(self, module_type, err=True):
*modules, type_name = module_type.children
if len(modules) > 0:
current_module = self.get_variable(modules[0].value, err=err)
if current_module is None:
self.errors.append(TypeCheckError(modules[0], "I can't find `%s` from this scope." % modules[0].value))
return None
current_module = current_module.type
if not isinstance(current_module, NModule):
self.errors.append(TypeCheckError(modules[0], "%s is not a module." % modules[0].value))
return None
for module in modules[1:]:
current_module = current_module.get(module.value)
if not isinstance(current_module, NModule):
self.errors.append(TypeCheckError(module, "%s is not a module." % module.value))
return None
n_type = current_module.types.get(type_name.value)
if n_type is None:
self.errors.append(TypeCheckError(type_name, "The module doesn't export a type `%s`." % type_name.value))
return None
else:
n_type = self.get_type(type_name.value, err=err)
if n_type is None:
self.errors.append(TypeCheckError(module_type, "I don't know what type you're referring to by `%s`." % type_name.value))
return None
if n_type == "invalid":
return None
else:
return n_type
def parse_type(self, tree_or_token, err=True):
if type(tree_or_token) == lark.Tree:
if tree_or_token.data == "with_typevars":
module_type, *typevars = tree_or_token.children
typevar_type = self.get_module_type(module_type, err=err)
parsed_typevars = [self.parse_type(typevar, err=err) for typevar in typevars]
if typevar_type is None:
return None
elif isinstance(typevar_type, NAliasType) or isinstance(typevar_type, NTypeVars):
# Duck typing :sunglasses:
if len(typevars) < len(typevar_type.typevars):
self.errors.append(TypeCheckError(tree_or_token, "%s expects %d type variable(s)." % (display_type(typevar_type), len(typevar_type.typevars))))
return None
elif len(typevars) > len(typevar_type.typevars):
self.errors.append(TypeCheckError(tree_or_token, "%s only expects %d type variable(s)." % (display_type(typevar_type), len(typevar_type.typevars))))
return None
return typevar_type.with_typevars(parsed_typevars) if None not in parsed_typevars else None
else:
self.errors.append(TypeCheckError(tree_or_token, "%s doesn't take any type variables." % display_type(typevar_type)))
return None
elif tree_or_token.data == "tupledef":
tuple_type = [self.parse_type(child, err=err) for child in tree_or_token.children]
return tuple_type if None not in tuple_type else None
elif tree_or_token.data == "recorddef":
record_type = {entry.children[0].value: self.parse_type(entry.children[1], err=err) for entry in tree_or_token.children}
return record_type if None not in record_type.values() else None
elif tree_or_token.data == "module_type":
n_type = self.get_module_type(tree_or_token, err=err)
if n_type is None:
return None
elif (isinstance(n_type, NAliasType) or isinstance(n_type, NTypeVars)) and len(n_type.typevars) > 0:
self.errors.append(TypeCheckError(tree_or_token, "%s expects %d type variables." % (display_type(n_type), len(n_type.typevars))))
return None
elif isinstance(n_type, NAliasType):
return n_type.with_typevars()
return n_type
elif tree_or_token.data == "func_type":
func_types = tree_or_token.children
if type(func_types[0]) == lark.Tree and func_types[0].data == "generic_declaration":
generics, *func_types = func_types
scope = self.new_scope()
for generic in generics.children:
if generic.value in scope.types:
self.errors.append(TypeCheckError(generic, "You already defined a generic type with this name."))
scope.types[generic.value] = NGenericType(generic.value)
else:
scope = self
func_type = tuple(scope.parse_type(child, err=err) for child in func_types)
if None in func_type:
return None
# If the function type returns a function, flatten the entire
# thing
if isinstance(func_type[-1], tuple):
func_type = tuple([*func_type[0:-1], *func_type[-1]])
return func_type
elif err:
raise NameError("Type annotation of type %s; I am not ready for this." % tree_or_token.data)
else:
self.errors.append(TypeCheckError(tree_or_token, "Internal problem: encountered a type annotation type %s." % tree_or_token.data))
return None
elif tree_or_token.type == "UNIT":
return "unit"
elif err:
raise NameError("Type annotation token of type %s; I am not ready for this." % tree_or_token.data)
else:
self.errors.append(TypeCheckError(tree_or_token, "Internal problem: encountered a type annotation token type %s." % tree_or_token.data))
return None
def get_name_type(self, name_type, err=True, get_type=True):
pattern = get_destructure_pattern(name_type.children[0])
if len(name_type.children) == 1:
# No type annotation given, so it's implied
return pattern, 'infer'
else:
return pattern, self.parse_type(name_type.children[1], err) if get_type else 'whatever'
"""
Sets variables from a pattern given a value or a type and returns whether
the entire pattern matched.
This is used by both type-checking (with warn=True) and interpreting
(warn=False). During type-checking, `value_or_type` is the type (notably,
tuples are lists), so it must determine whether it's even reasonable to
destructure the type (for example, it doesn't make sense to destructure a
record as a list), and error accordingly. During interpreting,
`value_or_type` is the actual value, and thanks to the type-checker, the
value should be guaranteed to fit the pattern.
- warn=True - Is the pattern valid?
- warn=False - Does the pattern match?
Note that this sets variables while checking the pattern, so it's possible
that variables are assigned even if the entire pattern doesn't match.
Fortunately, this is only used in cases where the conditional let would
create a new scope (such as in an if statement), so the extra variables can
be discarded if the pattern ends up not matching.
NOTE: This must return True if warn=True. (In other words, don't short
circuit if a pattern fails to match.)
"""
def assign_to_pattern(self, pattern_and_src, value_or_type, warn=False, path=None, public=False, certain=False):
path_name = path or "the value"
pattern, src = pattern_and_src
if isinstance(pattern, dict):
is_dict = isinstance(value_or_type, dict)
if is_dict:
# Should this be an error? Warning?
unused_keys = [key for key in value_or_type.keys() if key not in pattern]
if len(unused_keys) > 0:
self.errors.append(TypeCheckError(src, "%s (%s) has field(s) %s, but you haven't destructured them. (Hint: use `_` to denote unused fields.)" % (display_type(value_or_type), path_name, ", ".join(unused_keys))))
else:
if warn:
if value_or_type is not None:
self.errors.append(TypeCheckError(src, "I can't destructure %s as a record because %s is not a record." % (path_name, display_type(value_or_type))))
else:
raise TypeError("Destructuring non-record as record.")
for key, (sub_pattern, parse_src) in pattern.items():
value = value_or_type.get(key) if is_dict else None
if is_dict and value is None:
if warn:
self.errors.append(TypeCheckError(parse_src, "I can't get the field %s from %s because %s doesn't have that field." % (key, path_name, display_type(value_or_type))))
else:
raise TypeError("Given record doesn't have a key %s." % key)
valid = self.assign_to_pattern((sub_pattern, parse_src), value, warn, "%s.%s" % (path or "<record>", key), public, certain=certain)
if not valid:
return False
elif isinstance(pattern, tuple):
# I believe the interpreter uses actual Python tuples, while the
# type checker uses lists for tuple types. We should fix that for
# the type checker.
is_tuple = isinstance(value_or_type, list) if warn else isinstance(value_or_type, tuple)
if not is_tuple:
if warn:
if value_or_type is not None:
self.errors.append(TypeCheckError(src, "I can't destructure %s as a tuple because %s is not a tuple." % (path_name, display_type(value_or_type))))
else:
raise TypeError("Destructuring non-record as record.")
if is_tuple and len(pattern) != len(value_or_type):
if warn:
if len(pattern) > len(value_or_type):
_, parse_src = pattern[len(value_or_type)]
self.errors.append(TypeCheckError(parse_src, "I can't destructure %d items from a %s." % (len(pattern), display_type(value_or_type))))
else:
self.errors.append(TypeCheckError(src, "I can't destructure only %d items from a %s. (Hint: use `_` to denote unused members of a destructured tuple.)" % (len(pattern), display_type(value_or_type))))
else:
raise TypeError("Number of destructured values from tuple doesn't match tuple length.")
for i, (sub_pattern, parse_src) in enumerate(pattern):
value = value_or_type[i] if is_tuple and i < len(value_or_type) | |
import logging
import numpy as np
import argparse
import json
from pytorch_pretrained_bert.tokenization import BertTokenizer
import torch
from tap2.bottom_machine.hypers import DataOptions, SentMarkerStyle
logger = logging.getLogger(__name__)
logging.basicConfig(format='%(filename)s:%(lineno)d - %(message)s',
datefmt='%m/%d/%Y %H:%M:%S',
level=logging.INFO)
class Example:
"""
A single training/test example for the supporting fact prediction.
The key concepts here are 'chunks' these are chunks of retrieved passages.
If we have short passages, we put them together to create a chunk.
If we have long passages, we split them into multiple chunks.
A single question may also be split into multiple examples, since our max_seq_length and max_chunks may not permit
all passages plus the question to be in a single example together.
"""
def __init__(self,
id,
chunk_tokens,
question_len,
sent_starts,
sent_ends,
sent_ids,
supporting_facts):
self.id = id
self.chunk_tokens = chunk_tokens # chunks x seq_length
self.chunk_lengths = [(len(ct)-question_len) for ct in chunk_tokens]
self.question_len = question_len # scalar, each chunk has a question this long prepended
self.sent_starts = sent_starts # num_sents
self.sent_ends = sent_ends
self.sent_ids = sent_ids
self.supporting_facts = supporting_facts # sent_ids that should be selected
# after taking cat([ct[question_len:] for ct in chuck_tokens])
# sent_starts and sent_ends contain the start and end token for each sentence
self.segment_ids = [[0]*question_len + [1]*(len(ct)-question_len) for ct in chunk_tokens]
assert len(self.chunk_tokens) == len(self.chunk_lengths)
def display(self):
logger.info(f'{self.id}')
for ct, sids in zip(self.chunk_tokens, self.segment_ids):
logger.info(f'{str(list(zip(ct, sids)))}')
allt = [t for ct in self.chunk_tokens for t in ct[self.question_len:]]
for si in range(len(self.sent_ids)):
logger.info(f'{self.sent_ids[si]} = {str(allt[self.sent_starts[si]:self.sent_ends[si]+1])}')
logger.info(f'supporting facts = {str(self.supporting_facts)}')
class ExampleBuilder:
"""
builds Example instances from a sequence of sub-passages
"""
def __init__(self, id, qtoks, supporting_facts, max_seq_length, max_chunks, sent_marker_style: SentMarkerStyle):
self.id = id # the question id
self.qtoks = qtoks # the question tokens
self.supporting_facts = supporting_facts # ids of the sentences that are supporting facts
self.qlen = len(qtoks) + 2 # length of the question plus [CLS] and [SEP]
self.max_seq_length = max_seq_length
self.max_chunks = max_chunks
self.sent_marker_style = sent_marker_style
self.examples = [] # current set of examples constructed
self.reset()
def reset(self):
self.global_sent_start_indices = []
self.global_sent_end_indices = []
self.sum_passage_len = 0 # total length of the chunks, excluding the qlen
self.all_sent_ids = []
self.chunk_tokens = []
self.cur_chunk = None
def add_example(self):
if self.cur_chunk is not None:
# add current chunk to the example if we have one
self.cur_chunk.append('[SEP]')
self.chunk_tokens.append(self.cur_chunk)
self.cur_chunk = None
assert len(self.chunk_tokens) <= self.max_chunks
if len(self.chunk_tokens) > 0:
ex = Example(self.id, self.chunk_tokens, self.qlen,
self.global_sent_start_indices, self.global_sent_end_indices, self.all_sent_ids,
self.supporting_facts)
self.examples.append(ex)
self.reset()
def add_sub_passage(self, sp):
passage = []
sent_start_indices = []
sent_end_indices = []
sent_ids = []
for sid, sent in sp:
# if using sent_markers only as markers we put len(passage)+1 and len(passage)-2
offset = 1 if self.sent_marker_style == SentMarkerStyle.mark else 0
sent_start_indices.append(len(passage) + offset)
passage.extend(sent)
sent_end_indices.append(len(passage) - 1 - offset)
sent_ids.append(sid)
# now we've got the passage in our local vars
if self.cur_chunk is not None and len(self.cur_chunk) + len(passage) >= self.max_seq_length:
# finish current chunk if needed
self.cur_chunk.append('[SEP]')
self.sum_passage_len += 1
self.chunk_tokens.append(self.cur_chunk)
self.cur_chunk = None
if len(self.chunk_tokens) >= self.max_chunks:
# finish current example if needed
self.add_example()
if self.cur_chunk is None:
# initialize current chunk if needed
self.cur_chunk = []
self.cur_chunk.append('[CLS]')
self.cur_chunk.extend(self.qtoks)
self.cur_chunk.append('[SEP]')
if self.qlen + len(passage) > self.max_seq_length:
raise ValueError
# add to current example
self.cur_chunk.extend(passage)
self.global_sent_start_indices.extend([i + self.sum_passage_len for i in sent_start_indices])
self.global_sent_end_indices.extend([i + self.sum_passage_len for i in sent_end_indices])
self.all_sent_ids.extend(sent_ids)
self.sum_passage_len += len(passage)
def to_passages(contexts, tokenizer, max_sent_len, sent_marker_style: SentMarkerStyle):
"""
tokenize the sentences in the json example
:param contexts: list of pairs first is passage id, second is list of sentences
:param tokenizer:
:param max_sent_len:
:return:
"""
passages = []
if sent_marker_style == SentMarkerStyle.no:
max_real_sent_len = max_sent_len
else:
max_real_sent_len = max_sent_len - 2
for context in contexts:
pid = context[0]
sents = context[1]
sidtoks = []
for ndx, sent in enumerate(sents):
if len(sent.strip()) == 0: # this is quite common, lots of empty sentences
continue
sid = pid + ":" + str(ndx)
stoks = tokenizer.tokenize(sent)
if not stoks:
logger.warning(f'No tokens for {sid}:"{sent}"')
continue
if len(stoks) > max_real_sent_len:
stoks = stoks[0:max_real_sent_len]
if sent_marker_style != SentMarkerStyle.no:
stoks = ['[STARTSENT]'] + stoks + ['[ENDSENT]']
assert 0 < len(stoks) <= max_sent_len
sidtoks.append((sid, stoks))
passages.append(sidtoks)
return passages
def to_sub_passages(passages, qlen, max_seq_len):
"""
split passages that are too long into multiple passages
:param passages:
:param qlen:
:param max_seq_len:
:return:
"""
passages.sort(key=lambda p: sum([len(s[1]) for s in p]))
sub_passages = []
for passage in passages:
splen = 0
sub_passage = []
for si in range(len(passage)):
sent = passage[si]
# if this sentence will make the passage too long, stop adding and make a sub-passage
if splen + len(sent[1]) + qlen >= max_seq_len:
sub_passages.append(sub_passage)
# the next sub-passage will include the prev sentence
assert si > 0 # no two sentences should make a passage too long - ensured by max_sent_len
sub_passage = [passage[si-1]]
splen = len(passage[si-1][1])
sub_passage.append(sent)
splen += len(sent[1])
sub_passages.append(sub_passage)
return sub_passages
def truncate_passages(passages, qlen, max_seq_len):
passages.sort(key=lambda p: sum([len(s[1]) for s in p]))
sub_passages = []
for passage in passages:
splen = 0
sub_passage = passage
for si in range(len(passage)):
sent = passage[si]
# if this sentence will make the passage too long, stop adding and make a sub-passage
if splen + len(sent[1]) + qlen >= max_seq_len:
sub_passage = passage[0:si]
break
splen += len(sent[1])
sub_passages.append(sub_passage)
return sub_passages
def example_to_features(jex, tokenizer, opts: DataOptions):
"""
split the original json example into the tensor precursors for the supporting facts model
:param jex:
:param tokenizer:
:param opts
:return:
"""
id = jex['_id'] # if '_id' in jex else 'dummyid' (a missing id is actually an error)
question = jex['question']
qtoks = tokenizer.tokenize(question)
if len(qtoks) > opts.max_question_len:
qtoks = qtoks[0:opts.max_question_len]
supporting_facts = None
if 'supporting_facts' in jex:
supporting_facts = [sp[0]+':'+str(sp[1]) for sp in jex['supporting_facts']]
contexts = jex['context']
qlen = len(qtoks) + 2
passages = to_passages(contexts, tokenizer, (opts.max_seq_len-1-qlen)//2, opts.sent_marker_style)
if opts.truncate_passages:
sub_passages = truncate_passages(passages, qlen, opts.max_seq_len)
else:
sub_passages = to_sub_passages(passages, qlen, opts.max_seq_len)
example_builder = ExampleBuilder(id, qtoks, supporting_facts,
opts.max_seq_len, opts.num_para_chunks, opts.sent_marker_style)
for sp in sub_passages:
example_builder.add_sub_passage(sp)
example_builder.add_example() # add final example if available
return example_builder.examples
def get_features(filename, tokenizer: BertTokenizer, opts: DataOptions):
with open(filename, 'r') as fp:
jdata = json.load(fp)
split_question_count = 0
for jex in jdata:
list_of_features = example_to_features(jex, tokenizer, opts)
if len(list_of_features) > 1:
split_question_count += 1
if not opts.split_questions:
list_of_features = list_of_features[0:1]
for features in list_of_features:
yield features
logger.info(f'number of split questions in {filename} = {split_question_count}')
def get_data(filename, tokenizer: BertTokenizer, opts: DataOptions, limit=0):
dataset = []
max_chunks = 0
max_sent_len = 0
qid2supportingfacts = dict()
qid2sentids = dict()
for features in get_features(filename, tokenizer, opts):
# convert to torch tensors
slen = max([len(ct) for ct in features.chunk_tokens])
chunk_token_ids = [tokenizer.convert_tokens_to_ids(ct) + [0]*(slen-len(ct)) for ct in features.chunk_tokens]
segment_ids = [sids + [0]*(slen-len(sids)) for sids in features.segment_ids]
if len(features.chunk_tokens) > max_chunks:
max_chunks = len(features.chunk_tokens)
max_sent_len = max(max_sent_len, (np.array(features.sent_ends)-np.array(features.sent_starts)).max())
sent_targets = None
if features.supporting_facts is not None:
qid2supportingfacts[features.id] = features.supporting_facts
for sid in features.sent_ids:
qid2sentids.setdefault(features.id, set()).add(sid)
sent_targets = torch.zeros(len(features.sent_ids), dtype=torch.float)
for sf in features.supporting_facts:
if sf not in features.sent_ids:
continue
sent_targets[features.sent_ids.index(sf)] = 1
assert len(features.sent_starts) == len(features.sent_ends) == len(features.sent_ids)
assert len(chunk_token_ids) == len(features.chunk_lengths)
dataset.append((features.id, features.sent_ids, features.question_len, features.chunk_lengths,
torch.tensor(chunk_token_ids, dtype=torch.long),
torch.tensor(segment_ids, dtype=torch.long),
torch.tensor(features.sent_starts, dtype=torch.long),
torch.tensor(features.sent_ends, dtype=torch.long),
sent_targets))
if 0 < limit <= len(dataset):
break
if len(dataset) % 5000 == 0:
logger.info(f'loading dataset item {len(dataset)} from {filename}')
logger.info(f'in {filename}: max_chunks = {max_chunks}, max_sent_length = {max_sent_len}')
out_of_recall = 0
total_positives = 0
for id, sps in qid2supportingfacts.items():
total_positives += len(sps)
sent_ids = qid2sentids.get(id)
for sp in sps:
if sp not in sent_ids:
out_of_recall += 1
if len(qid2supportingfacts) > 0:
logger.info(f'in {filename}, due to truncations we have lost {out_of_recall} out of {total_positives} positives')
return dataset, qid2supportingfacts
def display_batch(batch, tokenizer: BertTokenizer):
id, sent_ids, qlen, chunk_lengths, chunk_tokens, segment_ids, sent_starts, sent_ends, sent_targets = batch
assert chunk_tokens.shape[0] == len(chunk_lengths)
chunk_tokens = chunk_tokens.numpy()
segment_ids = segment_ids.numpy()
sent_starts = sent_starts.numpy()
sent_ends = sent_ends.numpy()
sent_targets = sent_targets.numpy()
logger.info(f'{id}')
all_toks = []
for ci in range(len(chunk_lengths)):
clen = chunk_lengths[ci]
chunk_toks = tokenizer.convert_ids_to_tokens(chunk_tokens[ci])
all_toks.extend(chunk_toks[qlen:qlen+clen])
segments = segment_ids[ci]
logger.info(f'{str(list(zip(chunk_toks, segments)))}')
for si in range(len(sent_ids)):
logger.info(f'{sent_targets[si]} {sent_ids[si]} = {str(all_toks[sent_starts[si]:sent_ends[si]+1])}')
def main():
parser = argparse.ArgumentParser()
# Other parameters
parser.add_argument("--bert_model", default=None, type=str, required=True,
help="BERT model to use")
parser.add_argument("--data", default=None, type=str, required=True,
help="HotpotQA | |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
from pytorch3d import _C
from pytorch3d.structures import Meshes, Pointclouds
from torch.autograd import Function
from torch.autograd.function import once_differentiable
"""
This file defines distances between meshes and pointclouds.
The functions make use of the definition of a distance between a point and
an edge segment or the distance of a point and a triangle (face).
The exact mathematical formulations and implementations of these
distances can be found in `csrc/utils/geometry_utils.cuh`.
"""
# PointFaceDistance
class _PointFaceDistance(Function):
"""
Torch autograd Function wrapper PointFaceDistance Cuda implementation
"""
@staticmethod
def forward(ctx, points, points_first_idx, tris, tris_first_idx, max_points):
"""
Args:
ctx: Context object used to calculate gradients.
points: FloatTensor of shape `(P, 3)`
points_first_idx: LongTensor of shape `(N,)` indicating the first point
index in each example in the batch
tris: FloatTensor of shape `(T, 3, 3)` of triangular faces. The `t`-th
triangular face is spanned by `(tris[t, 0], tris[t, 1], tris[t, 2])`
tris_first_idx: LongTensor of shape `(N,)` indicating the first face
index in each example in the batch
max_points: Scalar equal to maximum number of points in the batch
Returns:
dists: FloatTensor of shape `(P,)`, where `dists[p]` is the squared
euclidean distance of `p`-th point to the closest triangular face
in the corresponding example in the batch
idxs: LongTensor of shape `(P,)` indicating the closest triangular face
in the corresponding example in the batch.
`dists[p]` is
`d(points[p], tris[idxs[p], 0], tris[idxs[p], 1], tris[idxs[p], 2])`
where `d(u, v0, v1, v2)` is the distance of point `u` from the triangular
face `(v0, v1, v2)`
"""
dists, idxs = _C.point_face_dist_forward(
points, points_first_idx, tris, tris_first_idx, max_points
)
ctx.save_for_backward(points, tris, idxs)
return dists
@staticmethod
@once_differentiable
def backward(ctx, grad_dists):
grad_dists = grad_dists.contiguous()
points, tris, idxs = ctx.saved_tensors
grad_points, grad_tris = _C.point_face_dist_backward(
points, tris, idxs, grad_dists
)
return grad_points, None, grad_tris, None, None
# pyre-fixme[16]: `_PointFaceDistance` has no attribute `apply`.
point_face_distance = _PointFaceDistance.apply
# FacePointDistance
class _FacePointDistance(Function):
"""
Torch autograd Function wrapper FacePointDistance Cuda implementation
"""
@staticmethod
def forward(ctx, points, points_first_idx, tris, tris_first_idx, max_tris):
"""
Args:
ctx: Context object used to calculate gradients.
points: FloatTensor of shape `(P, 3)`
points_first_idx: LongTensor of shape `(N,)` indicating the first point
index in each example in the batch
tris: FloatTensor of shape `(T, 3, 3)` of triangular faces. The `t`-th
triangular face is spanned by `(tris[t, 0], tris[t, 1], tris[t, 2])`
tris_first_idx: LongTensor of shape `(N,)` indicating the first face
index in each example in the batch
max_tris: Scalar equal to maximum number of faces in the batch
Returns:
dists: FloatTensor of shape `(T,)`, where `dists[t]` is the squared
euclidean distance of `t`-th triangular face to the closest point in the
corresponding example in the batch
idxs: LongTensor of shape `(T,)` indicating the closest point in the
corresponding example in the batch.
`dists[t] = d(points[idxs[t]], tris[t, 0], tris[t, 1], tris[t, 2])`,
where `d(u, v0, v1, v2)` is the distance of point `u` from the triangular
face `(v0, v1, v2)`.
"""
dists, idxs = _C.face_point_dist_forward(
points, points_first_idx, tris, tris_first_idx, max_tris
)
ctx.save_for_backward(points, tris, idxs)
return dists
@staticmethod
@once_differentiable
def backward(ctx, grad_dists):
grad_dists = grad_dists.contiguous()
points, tris, idxs = ctx.saved_tensors
grad_points, grad_tris = _C.face_point_dist_backward(
points, tris, idxs, grad_dists
)
return grad_points, None, grad_tris, None, None
# pyre-fixme[16]: `_FacePointDistance` has no attribute `apply`.
face_point_distance = _FacePointDistance.apply
# PointEdgeDistance
class _PointEdgeDistance(Function):
"""
Torch autograd Function wrapper PointEdgeDistance Cuda implementation
"""
@staticmethod
def forward(ctx, points, points_first_idx, segms, segms_first_idx, max_points):
"""
Args:
ctx: Context object used to calculate gradients.
points: FloatTensor of shape `(P, 3)`
points_first_idx: LongTensor of shape `(N,)` indicating the first point
index for each example in the mesh
segms: FloatTensor of shape `(S, 2, 3)` of edge segments. The `s`-th
edge segment is spanned by `(segms[s, 0], segms[s, 1])`
segms_first_idx: LongTensor of shape `(N,)` indicating the first edge
index for each example in the mesh
max_points: Scalar equal to maximum number of points in the batch
Returns:
dists: FloatTensor of shape `(P,)`, where `dists[p]` is the squared
euclidean distance of `p`-th point to the closest edge in the
corresponding example in the batch
idxs: LongTensor of shape `(P,)` indicating the closest edge in the
corresponding example in the batch.
`dists[p] = d(points[p], segms[idxs[p], 0], segms[idxs[p], 1])`,
where `d(u, v0, v1)` is the distance of point `u` from the edge segment
spanned by `(v0, v1)`.
"""
dists, idxs = _C.point_edge_dist_forward(
points, points_first_idx, segms, segms_first_idx, max_points
)
ctx.save_for_backward(points, segms, idxs)
return dists
@staticmethod
@once_differentiable
def backward(ctx, grad_dists):
grad_dists = grad_dists.contiguous()
points, segms, idxs = ctx.saved_tensors
grad_points, grad_segms = _C.point_edge_dist_backward(
points, segms, idxs, grad_dists
)
return grad_points, None, grad_segms, None, None
# pyre-fixme[16]: `_PointEdgeDistance` has no attribute `apply`.
point_edge_distance = _PointEdgeDistance.apply
# EdgePointDistance
class _EdgePointDistance(Function):
"""
Torch autograd Function wrapper EdgePointDistance Cuda implementation
"""
@staticmethod
def forward(ctx, points, points_first_idx, segms, segms_first_idx, max_segms):
"""
Args:
ctx: Context object used to calculate gradients.
points: FloatTensor of shape `(P, 3)`
points_first_idx: LongTensor of shape `(N,)` indicating the first point
index for each example in the mesh
segms: FloatTensor of shape `(S, 2, 3)` of edge segments. The `s`-th
edge segment is spanned by `(segms[s, 0], segms[s, 1])`
segms_first_idx: LongTensor of shape `(N,)` indicating the first edge
index for each example in the mesh
max_segms: Scalar equal to maximum number of edges in the batch
Returns:
dists: FloatTensor of shape `(S,)`, where `dists[s]` is the squared
euclidean distance of `s`-th edge to the closest point in the
corresponding example in the batch
idxs: LongTensor of shape `(S,)` indicating the closest point in the
corresponding example in the batch.
`dists[s] = d(points[idxs[s]], edges[s, 0], edges[s, 1])`,
where `d(u, v0, v1)` is the distance of point `u` from the segment
spanned by `(v0, v1)`.
"""
dists, idxs = _C.edge_point_dist_forward(
points, points_first_idx, segms, segms_first_idx, max_segms
)
ctx.save_for_backward(points, segms, idxs)
return dists
@staticmethod
@once_differentiable
def backward(ctx, grad_dists):
grad_dists = grad_dists.contiguous()
points, segms, idxs = ctx.saved_tensors
grad_points, grad_segms = _C.edge_point_dist_backward(
points, segms, idxs, grad_dists
)
return grad_points, None, grad_segms, None, None
# pyre-fixme[16]: `_EdgePointDistance` has no attribute `apply`.
edge_point_distance = _EdgePointDistance.apply
def point_mesh_edge_distance(meshes: Meshes, pcls: Pointclouds):
"""
Computes the distance between a pointcloud and a mesh within a batch.
Given a pair `(mesh, pcl)` in the batch, we define the distance to be the
sum of two distances, namely `point_edge(mesh, pcl) + edge_point(mesh, pcl)`
`point_edge(mesh, pcl)`: Computes the squared distance of each point p in pcl
to the closest edge segment in mesh and averages across all points in pcl
`edge_point(mesh, pcl)`: Computes the squared distance of each edge segment in mesh
to the closest point in pcl and averages across all edges in mesh.
The above distance functions are applied for all `(mesh, pcl)` pairs in the batch
and then averaged across the batch.
Args:
meshes: A Meshes data structure containing N meshes
pcls: A Pointclouds data structure containing N pointclouds
Returns:
loss: The `point_edge(mesh, pcl) + edge_point(mesh, pcl)` distance
between all `(mesh, pcl)` in a batch averaged across the batch.
"""
if len(meshes) != len(pcls):
raise ValueError("meshes and pointclouds must be equal sized batches")
N = len(meshes)
# packed representation for pointclouds
points = pcls.points_packed() # (P, 3)
points_first_idx = pcls.cloud_to_packed_first_idx()
max_points = pcls.num_points_per_cloud().max().item()
# packed representation for edges
verts_packed = meshes.verts_packed()
edges_packed = meshes.edges_packed()
segms = verts_packed[edges_packed] # (S, 2, 3)
segms_first_idx = meshes.mesh_to_edges_packed_first_idx()
max_segms = meshes.num_edges_per_mesh().max().item()
# point to edge distance: shape (P,)
point_to_edge = point_edge_distance(
points, points_first_idx, segms, segms_first_idx, max_points
)
# weight each example by the inverse of number of points in the example
point_to_cloud_idx = pcls.packed_to_cloud_idx() # (sum(P_i), )
num_points_per_cloud = pcls.num_points_per_cloud() # (N,)
weights_p = num_points_per_cloud.gather(0, point_to_cloud_idx)
weights_p = 1.0 / weights_p.float()
point_to_edge = point_to_edge * weights_p
point_dist = point_to_edge.sum() / N
# edge to edge distance: shape (S,)
edge_to_point = edge_point_distance(
points, points_first_idx, segms, segms_first_idx, max_segms
)
# weight each example by the inverse of number of edges in the example
segm_to_mesh_idx = meshes.edges_packed_to_mesh_idx() # (sum(S_n),)
num_segms_per_mesh = meshes.num_edges_per_mesh() # (N,)
| |
"original bfid: %s: %s" % \
(src_file_record['status'][0],
src_file_record['status'][1])
return return_error, job, None, None, f0, \
is_migrating_multiple_copy
if f0:
break #found our original.
"""
#For trusted pnfs systems, there isn't a problem,
# but for untrusted we need to set the effective
# IDs to the owner of the file.
#
# If the source PNFS file has been deleted only do the
# chimera.File() instantiation; skip the euid/egid stuff to
# avoid tracebacks.
"""
# get all pnfs metadata - first the source file
if src_file_record['deleted'] == "no":
try:
# This version handles the seteuid() locking.
p1 = File(src_path)
except (KeyboardInterrupt, SystemExit):
raise sys.exc_info()[0], sys.exc_info()[1], sys.exc_info()[2]
except (OSError, IOError), msg:
return str(msg), job, None, None, f0, is_migrating_multiple_copy
except:
exc, msg = sys.exc_info()[:2]
return str(msg), job, None, None, f0, is_migrating_multiple_copy
else:
# What do we need an empty File class for?
p1 = chimera.File(src_path)
# get all pnfs metadata - second the destination file
try:
p2 = File(mig_path)
except (KeyboardInterrupt, SystemExit):
raise sys.exc_info()[0], sys.exc_info()[1], sys.exc_info()[2]
except (OSError, IOError), msg:
return str(msg), job, None, None, f0, is_migrating_multiple_copy
except:
exc, msg = sys.exc_info()[:2]
return str(msg), job, None, None, f0, is_migrating_multiple_copy
##################################################################
#Handle deleted files specially.
if src_file_record['deleted'] == YES:
# We can skip the checking that is performed in the rest of
# this function.
res = ''
return res, job, p1, p2, f0, is_migrating_multiple_copy
###################################################################
#If we happen to be migrating a multiple copy (which is only allowed
# with --force), then we need to do not modify layers 1 and 4.
is_migrating_multiple_copy = False
# check if the metadata are consistent
res = compare_metadata(p1, src_file_record, tag="VM1.s [p1==src_file_record]")
if debug_p:
debug_log('compare_metadata VM1.s res=%s src_file_record=%s' % (res,src_file_record,))
p_show(p1,"VM1.s")
# deal with already swapped metadata
if res == "bfid":
res = compare_metadata(p1, dst_file_record, tag="VM1.d [p1==dst_file_record]")
if debug_p:
debug_log('compare_metadata VM1.d res=%s dst_file_record=%s' % (res,dst_file_record,))
p_show(p1,"VM1.d")
if res == "bfid" and f0:
#Compare original, if applicable.
res = compare_metadata(p1, f0, tag="VM1 [p1==f0]")
if debug_p:
debug_log('compare_metadata VM1.f0 res=%s f0=%s' % (res,f0,))
p_show(p1,"VM1.f0")
if not res:
#Note: Don't update layers 1 and 4!
is_migrating_multiple_copy = True
else:
if not res:
#The metadata has already been swapped.
debug_log('compare_metadata is About to return OK res=%s job=%s p1=%s p2=%s ' \
% (res,job,p1,p2,))
return None, job, p1, p2, f0, is_migrating_multiple_copy
if res:
return_error = "[1] metadata %s %s are inconsistent on %s" \
% (src_bfid, src_path, res)
return return_error, job, p1, p2, f0, is_migrating_multiple_copy
debug_log('compare_metadata - checking p2')
if not p2.bfid and not p2.volume:
#The migration path has already been deleted. There is
# no file to compare with.
pass
elif dst_file_record['deleted'] == UNKNOWN:
#If the destination file has incomplete metadata skip the file
# check, because it has been observed that other attempts may
# have clobbered the migration path. This avoids a very cryptic
# error message.
#
# Note: This is only been observed when re-running failures from
# --make-failed-copies.
#
#[root@stkendm5a src]# ./migrate.py --status CDMS126809553600001
# (VOV014) src_bfid SDB (VP1291) dst_bfid SDB copied swapped
# CDMS126809553600001 N CDMS127764084200000 N y y
#
#MIGRATION
#
# (VOV014) src_bfid SDB (VON980) dst_bfid SDB copied swapped
# CDMS126809553600001 ON CDMS126809555000000 MUE
#
#MULTIPLE_COPY
#
# In the above example, trying to proceed with finishing the multiple
# copy CDMS126809555000000 was failing, because the failed multiple
# copy CDMS127764084200000 owns the tempory PNFS file. It may look
# like CDMS127764084200000 exists from migration, however checking
# the file_family of VP1291 reveals that it really is a failed
# second attempt at duplication.
#
# enstore info --gvol VP1291 | grep volume_family
# 'volume_family': 'minerva.minerva_copy_2.cpio_odc',
pass
else:
res = compare_metadata(p2, dst_file_record, tag="VM2 [p2==dst_file_record]")
# deal with already swapped file record
if res == "pnfsid":
res = compare_metadata(p2, dst_file_record, p1.pnfs_id, tag="VM2 [p2==dst_file_record+p1]")
elif res == "bfid" and f0:
res = compare_metadata(p2, f0, p1.pnfs_id, tag="VM2 [p2==f0,p1.pnfs_id]")
if res:
return_error = "[2] metadata %s %s are inconsistent on %s" \
% (dst_bfid, mig_path, res)
return return_error, job, p1, p2, f0, is_migrating_multiple_copy
# cross check
err_msg = ""
if src_file_record['size'] != dst_file_record['size']:
err_msg = "%s and %s have different size" % (src_bfid, dst_bfid)
elif src_file_record['complete_crc'] != dst_file_record['complete_crc']:
# src and dst seed can be different. We do not know crc seed, 0 or 1.
# Guess crc seed is 0 in the record and 1 in the other. Try to convert to seed 1 and compare.
#
# src crc seed 0 - convert to 1
# dst crc seed 1
src_seed_1_crc = checksum.convert_0_adler32_to_1_adler32(src_file_record['complete_crc'], src_file_record['size'])
if src_seed_1_crc != dst_file_record['complete_crc']:
# src crc seed 1
# dst crc seed 0 - convert to 1
dst_seed_1_crc = checksum.convert_0_adler32_to_1_adler32(dst_file_record['complete_crc'], dst_file_record['size'])
if dst_seed_1_crc != src_file_record['complete_crc']:
err_msg = "%s and %s have different crc" % (src_bfid, dst_bfid)
elif src_file_record['sanity_cookie'] != dst_file_record['sanity_cookie']:
err_msg = "%s and %s have different sanity_cookie" % (src_bfid, dst_bfid)
log(my_task, str(src_file_record['sanity_cookie']), str(dst_file_record['sanity_cookie']))
if err_msg:
if dst_file_record['deleted'] == YES and not is_swapped(src_bfid, fcc, db):
log(my_task,
"undoing migration of %s to %s do to error" % (src_bfid, dst_bfid))
log_uncopied(src_bfid, dst_bfid, fcc, db)
return err_msg, job, p1, p2, f0, is_migrating_multiple_copy
job = (src_file_record, src_volume_record, src_path,
dst_file_record, dst_volume_record, tmp_path, mig_path)
return None, job, p1, p2, f0, is_migrating_multiple_copy
##################################################################
# swap_metadata(...) helper functions
PKG_PREFIX='package-'
PKG_SUFFIX='.tar'
def _move_package_file(src,volume,src_chimera_file):
"""
move (rename) package file src=/pnfs_path/<old_volume>/<package> to /pnfs_path/<volume>/<package>
@type src: str
@param src: source package full path
@type volume: str
@param volume: destination volume name
@type chimera_file: chimera.File
@param chimera_file: chimera file class instance for the source file
@rtype: (str,None) or (None,str)
@return: tuple (None, new_name) if package file moved OK
@return: tuple (err_msg, None) in case of error
"""
#split package path to package file name, volume name and everything else
n=src.rsplit('/',2)
if len(n) != 3:
return (("Can not split package file path %s into /pnfs_path/<VOLUME>/<package>" % (src,)), None)
fname=n[2]
if not (fname.startswith(PKG_PREFIX) and fname.endswith(PKG_SUFFIX)):
return (("Package file name %s does not look like '%s*%s', not moving package to <volume> dir in pnfs" \
% (fname,PKG_PREFIX,PKG_SUFFIX)), None)
# change volume to new volume and move package file in pnfs
# we do not check old path contains old volume to fix situation when package file was in the wrong place
n[1] = volume
dest = '%s/%s/%s' % (n[0],n[1],n[2],)
new_dir = '%s/%s' % (n[0],n[1],)
if debug:
log("MOVE_PACKAGE", "DEBUG:: Package file:\t%s" % src)
log("MOVE_PACKAGE", "DEBUG:: Split:\t\t", n )
log("MOVE_PACKAGE", "DEBUG:: New:\t\t", dest )
log("MOVE_PACKAGE", "DEBUG:: New dir:\t" , new_dir )
log("MOVE_PACKAGE", "DEBUG:: Package name:\t" , fname )
try:
os.makedirs(new_dir, 0755)
except:
exc = sys.exc_info()
# any other error than package dir exists - return with error
if not (exc[0] is exceptions.OSError and exc[1][0] == errno.EEXIST) :
return (("Can not move package file in pnfs from %s to %s" % (src,dest,)), None)
try:
os.rename(src,dest)
except:
exc = sys.exc_info()
# file has been moved on previous run?
if exc[0] is exceptions.OSError and exc[1][0] == errno.ENOENT :
# does the destination file exist?
try:
statinfo = file_utils.get_stat(dest)
except:
statinfo = None
# is there a package in pnfs at destination path with right bfid and size?
if statinfo is not None :
cf = chimera.File(dest)
if cf.bfid == src_chimera_file.bfid \
and cf.size == src_chimera_file.size:
# package has been moved in pnfs/chimera in previous run
return (None,dest)
return (("Can not move package file in pnfs from %s to %s" % (src,dest,)), None)
# change package file name in pnfs layers
try:
change_pkg_name(src,dest,volume)
except:
return (("Can not update package file pnfs layers pnfs when moving package from %s to %s" % (src,dest,)), None)
return (None,dest)
def _new_package_name(src,volume,src_chimera_file):
"""
return package file name src=/pnfs_path/<old_volume>/<package> to /pnfs_path/<volume>/<package>
@type src: str
@param src: source package full path
@type volume: str
@param volume: destination volume name
@type chimera_file: chimera.File
@param chimera_file: chimera file class instance for the source file
@rtype: (str,None) or (None,str)
@return: new_name
"""
#split package path to package file name, volume name and everything else
n=src.rsplit('/',2)
if len(n) != 3:
return (("Can not split package file path %s into | |
{\n'
' id\n'
' userSettingId\n'
' userSetting {\n'
' id\n'
' userId\n'
' owner\n'
' createdAt\n'
' updatedAt\n'
' value\n'
' url\n'
' name\n'
' type\n'
' showWelcomeNext\n'
' showWelcomeSkip\n'
' showWelcomeRemind\n'
' welcomeData\n'
' }\n'
' labelID\n'
' label {\n'
' id\n'
' name\n'
' color\n'
' type\n'
' }\n'
' type\n'
' }\n'
' }\n',
'iblinkbyuser': '\n'
' query IbLinkByUser(\n'
' $userName: String\n'
' $sortDirection: ModelSortDirection\n'
' $filter: ModelInvestigationGuestUserLinkFilterInput\n'
' $limit: Int\n'
' $nextToken: String\n'
' ) {\n'
' IbLinkByUser(\n'
' userName: $userName\n'
' sortDirection: $sortDirection\n'
' filter: $filter\n'
' limit: $limit\n'
' nextToken: $nextToken\n'
' ) {\n'
' items {\n'
' id\n'
' userName\n'
' createdAt\n'
' updatedAt\n'
' investigationID\n'
' channelID\n'
' }\n'
' nextToken\n'
' }\n'
' }\n',
'listalertapikeys': '\n'
' query ListAlertApiKeys(\n'
' $filter: ModelAlertApiKeyFilterInput\n'
' $limit: Int\n'
' $nextToken: String\n'
' ) {\n'
' listAlertApiKeys(filter: $filter, limit: $limit, '
'nextToken: $nextToken) {\n'
' items {\n'
' id\n'
' alertsPbName\n'
' createdAt\n'
' updatedAt\n'
' author\n'
' }\n'
' nextToken\n'
' }\n'
' }\n',
'listallrunbookss': '\n'
' query ListAllRunbookss(\n'
' $filter: ModelAllRunbooksFilterInput\n'
' $limit: Int\n'
' $nextToken: String\n'
' ) {\n'
' listAllRunbookss(filter: $filter, limit: $limit, '
'nextToken: $nextToken) {\n'
' items {\n'
' id\n'
' name\n'
' description\n'
' content\n'
' author\n'
' commands\n'
' createdAt\n'
' updatedAt\n'
' RunBookConnectors\n'
' type\n'
' longSave\n'
' rbVars\n'
' scope\n'
' PeopleCanView\n'
' PeopleCanAccess\n'
' GroupsCanView\n'
' GroupsCanAccess\n'
' }\n'
' nextToken\n'
' }\n'
' }\n',
'listallworkflowss': '\n'
' query ListAllWorkflowss(\n'
' $filter: ModelAllWorkflowsFilterInput\n'
' $limit: Int\n'
' $nextToken: String\n'
' ) {\n'
' listAllWorkflowss(filter: $filter, limit: $limit, '
'nextToken: $nextToken) {\n'
' items {\n'
' id\n'
' createdAt\n'
' updatedAt\n'
' name\n'
' description\n'
' type\n'
' author\n'
' longSave\n'
' rbVars\n'
' }\n'
' nextToken\n'
' }\n'
' }\n',
'listbase64images': '\n'
' query ListBase64Images(\n'
' $filter: ModelBase64ImageFilterInput\n'
' $limit: Int\n'
' $nextToken: String\n'
' ) {\n'
' listBase64Images(filter: $filter, limit: $limit, '
'nextToken: $nextToken) {\n'
' items {\n'
' id\n'
' playbookID\n'
' base64String\n'
' }\n'
' nextToken\n'
' }\n'
' }\n',
'listchannellinks': '\n'
' query ListChannelLinks(\n'
' $filter: ModelChannelLinkFilterInput\n'
' $limit: Int\n'
' $nextToken: String\n'
' ) {\n'
' listChannelLinks(filter: $filter, limit: $limit, '
'nextToken: $nextToken) {\n'
' items {\n'
' id\n'
' contactId\n'
' chatId\n'
' unread\n'
' createdAt\n'
' updatedAt\n'
' }\n'
' nextToken\n'
' }\n'
' }\n',
'listchannels': '\n'
' query ListChannels(\n'
' $filter: ModelChannelFilterInput\n'
' $limit: Int\n'
' $nextToken: String\n'
' ) {\n'
' listChannels(filter: $filter, limit: $limit, nextToken: '
'$nextToken) {\n'
' items {\n'
' id\n'
' name\n'
' title\n'
' members\n'
' membersState\n'
' createdAt\n'
' updatedAt\n'
' memberLocation\n'
' }\n'
' nextToken\n'
' }\n'
' }\n',
'listchats': '\n'
' query ListChats(\n'
' $filter: ModelChatFilterInput\n'
' $limit: Int\n'
' $nextToken: String\n'
' ) {\n'
' listChats(filter: $filter, limit: $limit, nextToken: '
'$nextToken) {\n'
' items {\n'
' id\n'
' from\n'
' content\n'
' createdAt\n'
' }\n'
' nextToken\n'
' }\n'
' }\n',
'listcheatsheets': '\n'
' query ListCheatSheets(\n'
' $filter: ModelCheatSheetFilterInput\n'
' $limit: Int\n'
' $nextToken: String\n'
' ) {\n'
' listCheatSheets(filter: $filter, limit: $limit, '
'nextToken: $nextToken) {\n'
' items {\n'
' id\n'
' createdAt\n'
' updatedAt\n'
' title\n'
' author\n'
' content\n'
' type\n'
' }\n'
' nextToken\n'
' }\n'
' }\n',
'listchecklistinvlinks': '\n'
' query ListCheckListInvLinks(\n'
' $filter: ModelCheckListInvLinkFilterInput\n'
' $limit: Int\n'
' $nextToken: String\n'
' ) {\n'
' listCheckListInvLinks(\n'
' filter: $filter\n'
' limit: $limit\n'
' nextToken: $nextToken\n'
' ) {\n'
' items {\n'
' id\n'
' createdAt\n'
' updatedAt\n'
' investigationID\n'
' CheckListID\n'
' owner\n'
' }\n'
' nextToken\n'
' }\n'
' }\n',
'listchecklistitems': '\n'
' query ListCheckListItems(\n'
' $filter: ModelCheckListItemFilterInput\n'
' $limit: Int\n'
' $nextToken: String\n'
' ) {\n'
' listCheckListItems(filter: $filter, limit: $limit, '
'nextToken: $nextToken) {\n'
' items {\n'
' id\n'
' localID\n'
' createdAt\n'
' updatedAt\n'
' author\n'
' content\n'
' title\n'
' type\n'
' connection\n'
' MMNodeConnection\n'
' investigationID\n'
' assignee\n'
' associatedPlaybooks\n'
' associatedCommands\n'
' }\n'
' nextToken\n'
' }\n'
' }\n',
'listchecklists': '\n'
' query ListCheckLists(\n'
' $filter: ModelCheckListFilterInput\n'
' $limit: Int\n'
' $nextToken: String\n'
' ) {\n'
' listCheckLists(filter: $filter, limit: $limit, '
'nextToken: $nextToken) {\n'
' items {\n'
' id\n'
' createdAt\n'
' updatedAt\n'
' name\n'
' description\n'
' type\n'
' content\n'
' author\n'
' }\n'
' nextToken\n'
' }\n'
' }\n',
'listclicks': '\n'
' query ListClicks(\n'
' $filter: ModelClickFilterInput\n'
' $limit: Int\n'
' $nextToken: String\n'
' ) {\n'
' listClicks(filter: $filter, limit: $limit, nextToken: '
'$nextToken) {\n'
' items {\n'
' id\n'
' count\n'
' type\n'
' featureName\n'
' createdAt\n'
' updatedAt\n'
' readAt\n'
' }\n'
' nextToken\n'
' }\n'
' }\n',
'listclicllinks': '\n'
' query ListCliclLinks(\n'
' $filter: ModelCLICLLinkFilterInput\n'
' $limit: Int\n'
' $nextToken: String\n'
' ) {\n'
' listCLICLLinks(filter: $filter, limit: $limit, '
'nextToken: $nextToken) {\n'
' items {\n'
' id\n'
' createdAt\n'
' updatedAt\n'
' CheckListItemID\n'
' lane\n'
' CheckListID\n'
' }\n'
' nextToken\n'
' }\n'
' }\n',
'listcommandss': '\n'
' query ListCommandss(\n'
' $filter: ModelCommandsFilterInput\n'
' $limit: Int\n'
' $nextToken: String\n'
' ) {\n'
' listCommandss(filter: $filter, limit: $limit, '
'nextToken: $nextToken) {\n'
' items {\n'
' id\n'
' from\n'
' mode\n'
' command\n'
' output\n'
' state\n'
' returnCode\n'
' investigationId\n'
' createdAt\n'
' updatedAt\n'
' }\n'
' nextToken\n'
' }\n'
' }\n',
'listcomments': '\n'
' query ListComments(\n'
' $filter: ModelCommentFilterInput\n'
' $limit: Int\n'
' $nextToken: String\n'
' ) {\n'
' listComments(filter: $filter, limit: $limit, nextToken: '
'$nextToken) {\n'
' items {\n'
' id\n'
' parent\n'
' createdAt\n'
' updatedAt\n'
' author\n'
' content\n'
' type\n'
' links\n'
' investigationId\n'
' }\n'
' nextToken\n'
' }\n'
' }\n',
'listconferenceconfigs': '\n'
' query ListConferenceConfigs(\n'
' $filter: ModelConferenceConfigFilterInput\n'
' $limit: Int\n'
' $nextToken: String\n'
' ) {\n'
' listConferenceConfigs(\n'
' filter: $filter\n'
' limit: $limit\n'
' nextToken: $nextToken\n'
' ) {\n'
' items {\n'
' id\n'
' url\n'
' userName\n'
' <PASSWORD>'
' author\n'
' createdAt\n'
' updatedAt\n'
' }\n'
' nextToken\n'
' }\n'
' }\n',
'listconnectorforms': '\n'
' query ListConnectorForms(\n'
' $filter: ModelConnectorFormFilterInput\n'
' $limit: Int\n'
' $nextToken: String\n'
' ) {\n'
' listConnectorForms(filter: $filter, limit: $limit, '
'nextToken: $nextToken) {\n'
' items {\n'
' id\n'
' createdAt\n'
' updatedAt\n'
' name\n'
' owner\n'
' connectorID\n'
' configData\n'
' commandData\n'
' nodeData\n'
' scope\n'
' PeopleCanView\n'
' PeopleCanAccess\n'
' GroupsCanView\n'
' GroupsCanAccess\n'
' }\n'
' nextToken\n'
' }\n'
' }\n',
'listconnectorss': '\n'
' query ListConnectorss(\n'
' $filter: ModelConnectorsFilterInput\n'
' $limit: Int\n'
' $nextToken: String\n'
' ) {\n'
' listConnectorss(filter: $filter, limit: $limit, '
'nextToken: $nextToken) {\n'
' items {\n'
' id\n'
' createdAt\n'
' updatedAt\n'
' category\n'
' name\n'
' owner\n'
' description\n'
' detaileddescription\n'
' iconPath\n'
' version\n'
' md5\n'
' source\n'
' configuration\n'
' commands\n'
' commandsType\n'
' script\n'
' scriptType\n'
' scriptPath\n'
' scope\n'
' PeopleCanView\n'
' PeopleCanAccess\n'
' GroupsCanView\n'
' GroupsCanAccess\n'
' }\n'
' nextToken\n'
' }\n'
' }\n',
'listdashboardconfiglabellinks': '\n'
' query ListDashboardConfigLabelLinks(\n'
' $filter: '
'ModelDashboardConfigLabelLinkFilterInput\n'
' $limit: Int\n'
' $nextToken: String\n'
' ) {\n'
' listDashboardConfigLabelLinks(\n'
' filter: $filter\n'
' limit: $limit\n'
' nextToken: $nextToken\n'
' ) {\n'
' items {\n'
' id\n'
' dashboardConfigID\n'
' labelID\n'
' }\n'
' nextToken\n'
' }\n'
' }\n',
'listdashboardconfigs': '\n'
' query ListDashboardConfigs(\n'
' $filter: ModelDashboardConfigFilterInput\n'
' $limit: Int\n'
' $nextToken: String\n'
' ) {\n'
' listDashboardConfigs(\n'
' filter: $filter\n'
' limit: $limit\n'
' nextToken: $nextToken\n'
' ) {\n'
' items {\n'
' id\n'
' url\n'
' author\n'
' createdAt\n'
' updatedAt\n'
' }\n'
' nextToken\n'
' }\n'
' }\n',
'listecubesandboxexecutions': '\n'
' query ListEcubeSandboxExecutions(\n'
' $filter: '
'ModelEcubeSandboxExecutionFilterInput\n'
' $limit: Int\n'
' $nextToken: String\n'
' ) {\n'
' listEcubeSandboxExecutions(\n'
' filter: $filter\n'
' limit: $limit\n'
' nextToken: $nextToken\n'
' ) {\n'
' items {\n'
' id\n'
' owner\n'
' output\n'
' outputType\n'
' returnCode\n'
' status\n'
' createdAt\n'
' updatedAt\n'
' E3One\n'
' | |
LST must match')
if n_pointings < n_lst:
pointing_center = NP.repeat(pointing_center, n_lst, axis=0)
n_snaps = lst.size
if pointing_coords == 'dircos':
pointings_altaz = GEOM.dircos2altaz(pointing_center, units='degrees')
elif pointing_coords == 'hadec':
pointings_altaz = GEOM.hadec2altaz(pointing_center, latitude, units='degrees')
elif pointing_coords == 'radec':
pointings_altaz = GEOM.hadec2altaz(NP.hstack(((lst-pointing_center[:,0]).reshape(-1,1), pointing_center[:,1].reshape(-1,1))), latitude, units='degrees')
else:
pointings_altaz = NP.copy(pointing_center)
if skymodel.coords == 'radec':
lst_temp = NP.hstack((lst.reshape(-1,1),NP.zeros(n_snaps).reshape(-1,1))) # Prepare fake LST for numpy broadcasting
lst_temp = lst_temp.T
lst_temp = lst_temp[NP.newaxis,:,:]
sky_hadec = lst_temp - skymodel.location[:,:,NP.newaxis] # Reverses sign of declination
sky_hadec[:,1,:] *= -1 # Correct for the reversal of sign in the declination
sky_hadec = NP.concatenate(NP.split(sky_hadec, n_snaps, axis=2), axis=0)
sky_hadec = NP.squeeze(sky_hadec, axis=2)
sky_altaz = GEOM.hadec2altaz(sky_hadec, latitude, units='degrees')
elif skymodel.coords == 'hadec':
sky_altaz = GEOM.hadec2altaz(skymodel.location, latitude, units='degrees')
elif skymodel.coords == 'dircos':
sky_altaz = GEOM.dircos2altaz(skymodel.location, units='degrees')
else:
sky_altaz = NP.copy(skymodel.location)
sky_altaz = NP.split(sky_altaz, range(0,sky_altaz.shape[0],n_src)[1:], axis=0) # Split sky_altaz into a list of arrays
retval = []
progress = PGB.ProgressBar(widgets=[PGB.Percentage(), PGB.Bar(), PGB.ETA()], maxval=len(sky_altaz)).start()
for i in xrange(len(sky_altaz)):
pinfo = {}
pinfo['pointing_center'] = pointings_altaz[i,:]
pinfo['pointing_coords'] = 'altaz'
# if 'element_locs' in telescope_info:
# pinfo['element_locs'] = telescope_info['element_locs']
upper_hemisphere_ind = sky_altaz[i][:,0] >= 0.0
upper_skymodel = skymodel.subset(indices=NP.where(upper_hemisphere_ind)[0])
pb = PB.primary_beam_generator(sky_altaz[i][upper_hemisphere_ind,:], skymodel.frequency, telescope_info, freq_scale=freq_scale, skyunits='altaz', pointing_info=pinfo)
spectrum = upper_skymodel.generate_spectrum(interp_method='pchip')
retval += [NP.sum(pb*spectrum, axis=0) / NP.sum(pb, axis=0)]
progress.update(i+1)
progress.finish()
return NP.asarray(retval)
#################################################################################
class GainInfo(object):
"""
----------------------------------------------------------------------------
Class to manage instrument gains
Attributes:
gaintable [None or dictionary] If set to None, all antenna- and
baseline-based gains will be set to unity. If returned as
dictionary, it contains the loaded gains. It contains the
following keys and values:
'antenna-based' [None or dictionary] Contains antenna-
based instrument gain information. If
set to None, all antenna-based gains are
set to unity. If returned as dictionary,
it has the following keys and values:
'ordering' [list or numpy array] Three
element list of strings
indicating the ordering of
axes - 'time', 'label',
and 'frequency' as specified
in input axes_order
'gains' [scalar or numpy array]
Complex antenna-based
instrument gains. Must be
of shape (nant, nchan, nts)
If there is no variations in
gains along an axis, then
the corresponding nax may be
set to 1 and the gains will
be replicated along that
axis using numpy array
broadcasting. For example,
shapes (nant,1,1), (1,1,1),
(1,nchan,nts) are
acceptable. If specified as
a scalar, it will be
replicated along all three
axes, namely, 'label',
'frequency' and 'time'.
'label' [None or list or numpy
array] List or antenna
labels that correspond to
nant along the 'label' axis.
If nant=1, this may be set
to None, else it will be
specified and will match the
nant.
'frequency' [None or list or numpy array]
Frequency channels that
correspond to the nax along
the 'frequency' axis. If the
nchan=1 along the 'frequency'
axis, this may be set to None,
else it must be specified and
must match the nchan
'time' [None or list or numpy array]
Observation times that
correspond to the nax along
the 'time' axis. If the
ntimes=1 along the 'time'
axis, this may be set to None,
else it must be specified and
must match the ntimes. It must
be a float and can be in
seconds, hours, days, etc.
'baseline-based' [None or dictionary] Contains baseline-
based instrument gain information. If
set to None, all baseline-based gains
are set to unity. If returned as
dictionary, it has the following keys
and values:
'ordering' [list or numpy array] Three
element list of strings
indicating the ordering of
axes - 'time', 'label',
and 'frequency' as
specified in input
axes_order
'gains' [scalar or numpy array]
Complex baseline-based
instrument gains. Must be
of shape (nbl, nchan, nts)
If there is no variations in
gains along an axis, then
the corresponding nax may be
set to 1 and the gains will
be replicated along that
axis using numpy array
broadcasting. For example,
shapes (nant,1,1), (1,1,1),
(1,nchan,nts) are
acceptable. If specified as
a scalar, it will be
replicated along all three
axes, namely, 'label',
'frequency' and 'time'.
'label' [None or list or numpy
array] List or baseline
labels that correspond to
nbl along the 'label' axis.
If nbl=1 along the 'label'
axis this may be set to
None, else it will be
specified and will match nbl
'frequency' [None or list or numpy array]
Frequency channels that
correspond to the nax along
the 'frequency' axis. If the
nchan=1 along the 'frequency'
axis, this may be set to None,
else it must be specified and
must match the nchan
'time' [None or list or numpy array]
Observation times that
correspond to the nax along
the 'time' axis. If the
ntimes=1 along the 'time'
axis, this may be set to None,
else it must be specified and
must match the ntimes. It must
be a float and can be in
seconds, hours, days, etc.
interpfuncs [dictionary] Determined in member function interpolator().
Contains interpolation information under two keys, namely,
'antenna-based' and 'baseline-based'. Under each of these keys
is another dictionary with the following keys and values:
'dims' [numpy array of strings] Contains the axes labels
of the interpolated axes for antenna or baseline
labels. It could contain a single element ['time'],
of ['frequency'] indicating 1D splines along that
axis or contain two elements 'time' and 'frequency'
indicating 2D splines. 1D splines will have been
obtained with scipy.interpolate.interp1d while
2D splines obtained with scipy.interpolate.interp2d
'interp' [numpy recArray] Holds the interpolation functions
(instances of scipy.interpolate.interp1d or
scipy.interpolate.interp2d depending on the value
under 'dims' key) for each antenna or baseline
label. It is of size nbl. Each entry in this
numpy recArray has two fields, 'real' for
interpolation of real part and 'imag' for the
imaginary part. If it is a one element recarray,
then it applies to all antennas and baselines
Member function interpolate_gains() uses this attribute to
return interpolated gains
splinefuncs [dictionary] Determined in member function splinator().
Contains spline information under two keys, namely,
'antenna-based' and 'baseline-based'. Under each of these keys
is another dictionary with the following keys and values:
'dims' [numpy array of strings] Contains the axes labels
of the interpolated axes for antenna or baseline
labels. It could contain a single element ['time'],
of ['frequency'] indicating 1D splines along that
axis or contain two elements 'time' and 'frequency'
indicating 2D splines. 1D splines will have been
obtained with scipy.interpolate.UnivariateSpline
while 2D splines obtained with
scipy.interpolate.RectBivariateSpline
'interp' [numpy recArray] Holds the spline functions
(instances of scipy.interpolate.UnivariateSpline or
scipy.interpolate.RectBivariateSpline depending on
the value under 'dims' key) for each antenna or
baseline label. It is of size nbl. Each entry in
this numpy recArray has two fields, 'real' for
interpolation of real part and 'imag' for the
imaginary part. If it is a one element recarray,
then it applies to all antennas and baselines.
Member function spline_gains() uses this attribute to return
spline-interpolated gains
Member functions:
__init__() Initialize an instance of class GainInfo from a file
read_gaintable()
Read gain table from file in HDF5 format and return and/or
store as attribute
eval_gains()
Extract complex instrument gains for given baselines from the
gain table
interpolator()
Sets up interpolation functions and stores them in the
attribute interpfuncs. Better alternative is to use splinator()
splinator() Sets up spline functions and stores them in the attribute
splinefuncs. Better alternative to interpolator()
interpolate_gains()
Interpolate at the specified baselines for the given
frequencies and times using attribute interpfuncs. Better
alternative is to use spline_gains()
spline_gains()
Evaluate spline at the specified baselines for the given
frequencies and times using attribute splinefuncs. Better
alternative to interpolate_gains()
nearest_gains()
Extract complex instrument gains for given baselines from the
gain table determined by nearest neighbor logic
write_gaintable()
Write gain table with specified axes ordering to external file
in HDF5 format
-----------------------------------------------------------------------------
"""
def __init__(self, init_file=None, axes_order=None):
"""
------------------------------------------------------------------------
Initialize an instance of class GainInfo from a file
Attributes initialized are:
gaintable, interpfuncs, splinefuncs
Read docstring of class GainInfo for details on these attributes
Keyword Inputs:
gainsfile | |
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# >>
# Copyright 2018 Vivint, inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# vivint-selenium-docker, 20017
# <<
import logging
import time
from abc import abstractmethod
from collections import Mapping
from functools import partial, wraps
import docker
import gevent
from docker.errors import APIError, DockerException, NotFound
from docker.models.containers import Container
from six import string_types
from selenium_docker.errors import DockerError, SeleniumDockerException
from selenium_docker.utils import gen_uuid
def check_engine(fn):
""" Pre-check our engine connection by sending a ping before our
intended operation.
Args:
fn (Callable): wrapped function.
Returns:
Callable
Example::
@check_engine
def do_something_with_docker(self):
# will raise APIError before getting here
# if there's a problem with the Docker Engine connection.
return True
"""
@wraps(fn)
def inner(self, *args, **kwargs):
self.logger.debug('pinging docker engine')
try:
self.docker.ping()
except SeleniumDockerException as e: # pragma: no cover
self.logger.exception(e, exc_info=True)
raise e
else:
self.logger.debug('pass')
return fn(self, *args, **kwargs)
return inner
class ContainerInterface(object):
""" Required functionality for implementing a custom object that has an
underlying container.
"""
CONTAINER = None
def __str__(self):
return '<%s(image=%s)>' % (
self.__class__.__name__, self.CONTAINER.get('image', 'None'))
@abstractmethod
def _make_container(self):
raise NotImplementedError
@abstractmethod
def close_container(self):
raise NotImplementedError
@abstractmethod
def quit(self):
raise NotImplementedError
class ContainerFactory(object):
""" Used as an interface for interacting with Container instances.
Example::
from selenium_docker.base import ContainerFactory
factory = ContainerFactory.get_default_factory('reusable')
factory.stop_all_containers()
Will attempt to connect to the local Docker Engine, including the word
``reusable`` as part of each new container's name. Calling
``factory.stop_all_containers()`` will stop and remove containers assocated
with that namespace.
Reusing the same ``namespace`` value will allow the factory to inherit
the correct containers from Docker when the program is reset.
Args:
engine (:obj:`docker.client.DockerClient`): connection to the
Docker Engine the application will interact with. If ``engine`` is
``None`` then :func:`docker.client.from_env` will be called to
attempt connecting locally.
namespace (str): common name included in all the new docker containers
to allow tracking their status and cleaning up reliably.
make_default (bool): when ``True`` this instance will become the
default, used as a singleton, when requested via
:func:`~ContainerFactory.get_default_factory`.
logger (:obj:`logging.Logger`): logging module Logger instance.
"""
DEFAULT = None
""":obj:`.ContainerFactory`: singleton instance to a container factory
that can be used to spawn new containers accross a single connected
Docker engine.
This is the instance returned by
:func:`~ContainerFactory.get_default_factory`.
"""
__slots__ = ('_containers', '_engine', '_ns', 'logger')
def __init__(self, engine, namespace, make_default=True, logger=None):
self._containers = {}
self._engine = engine or docker.from_env()
self._ns = namespace or gen_uuid(10)
self.logger = logger or logging.getLogger(
'%s.ContainerFactory.%s' % (__name__, self._ns))
if make_default and ContainerFactory.DEFAULT is None:
ContainerFactory.DEFAULT = self
if namespace:
# we supplied the namespace, we can bootstrap our
# tracked containers back from the environment
self._containers = self.get_namespace_containers(namespace)
def __repr__(self):
return '<ContainerFactory(docker=%s,ns=%s,count=%d)>' % (
self._engine.api.base_url, self._ns, len(self._containers.keys()))
@property
def containers(self):
"""dict:
:obj:`~docker.models.containers.Container` instances
mapped by name.
"""
return self._containers
@property
def docker(self):
""":obj:`docker.client.DockerClient`:
reference to the connected Docker engine.
"""
return self._engine
@property
def namespace(self):
"""str: ready-only property for this instance's namespace,
used for generating names.
"""
return self._ns
def __bootstrap(self, container, **kwargs):
""" Adds additional attributes and functions to Container instance.
Args:
container (Container): instance of
:obj:`~docker.models.containers.Container` that is being
fixed up with expected values.
kwargs (dict): arbitrary attribute names and their values to
attach to the ``container`` instance.
Returns:
:obj:`~docker.models.containers.Container`:
the exact instance passed in.
"""
self.logger.debug('bootstrapping container instance to factory')
c = container
for k, v in kwargs.items(): # pragma: no cover
setattr(c, k, v)
c.started = time.time()
c.logger = logging.getLogger('%s.%s' % (__name__, kwargs.get('name')))
c.ns = self._ns
return c
def as_json(self):
""" JSON representation of our factory metadata.
Returns:
dict:
that is a :py:func:`json.dumps` compatible dictionary instance.
"""
return {
'_ref': str(self),
'count': len(self.containers)
}
def gen_name(self, key=None):
""" Generate the name of a new container we want to run.
This method is used to keep names consistent as well as to ensure
the name/identity of the ``ContainerFactory`` is included. When a
``ContainerFactory`` is loaded on a machine with containers already
running with its name it'll inherit those instances to re-manage
between application runs.
Args:
key (str): the identifiable portion of a container name. If one
isn't supplied (the default) then one is randomly generated.
Returns:
str:
in the format of ``selenium-<FACTORY_NAMESPACE>-<KEY>``.
"""
return 'selenium-%s-%s' % (self._ns, key or gen_uuid(6))
@classmethod
def get_default_factory(cls, namespace=None, logger=None):
""" Creates a default connection to the local Docker engine.
This ``classmethod`` acts as a singleton. If one hasn't been made it
will attempt to create it and attach the instance to the class
definition. Because of this the method is the preferable way to obtain
the default connection so it doesn't get overwritten or modified by
accident.
Note:
By default this method will attempt to connect to the **local**
Docker engine only. Do not use this when attempting to use
a remote engine on a different machine.
Args:
namespace (str): use this namespace if we're creating a new
default factory instance.
logger (:obj:`logging.Logger`): instance of logger to attach
to this factory instance.
Returns:
:obj:`~.ContainerFactory`: instance to interact with Docker engine.
"""
if cls.DEFAULT is None:
cls(None, namespace, make_default=True, logger=logger)
return cls.DEFAULT
@check_engine
def get_namespace_containers(self, namespace=None):
""" Glean the running containers from the environment that are
using our factory's namespace.
Args:
namespace (str): word identifying ContainerFactory containers
represented in the Docker Engine.
Returns:
dict:
:obj:`~docker.models.containers.Container` instances
mapped by name.
"""
if namespace is None:
namespace = self.namespace
ret = {}
for c in self.docker.containers.list():
if namespace in c.name:
ret[c.name] = c
return ret
@check_engine
def load_image(self, image, tag=None, insecure_registry=False,
background=False):
""" Issue a ``docker pull`` command before attempting to start/run
containers. This could potentially increase startup time, as well
as ensure the containers are up-to-date.
Args:
image (str): name of the container we're downloading.
tag (str): tag/version of the container.
insecure_registry (bool): allow downloading image templates from
insecure Docker registries.
background (bool): spawn the download in a background thread.
Raises:
:exc:`docker.errors.DockerException`:
if anything goes wrong during the image template download.
Returns:
:obj:`docker.models.images.Image`:
the Image controlled by the connected Docker engine.
Containers are spawned based off this template.
"""
if tag is None:
tag = ''
if isinstance(image, Mapping):
image = image.get('image', None)
if not isinstance(image, string_types):
raise ValueError('cannot determine image from %s' % type(image))
try:
self.logger.debug('checking locally for image')
img = self.docker.images.get(image)
except NotFound as e:
self.logger.debug('could not find image locally, %s', image)
else:
return img
self.logger.debug('loading image, %s:%s', image, tag or 'latest')
fn = partial(self.docker.images.pull,
image,
tag=tag,
insecure_registry=insecure_registry,
stream=True)
if background:
gevent.spawn(fn)
else:
return fn()
@check_engine
def scrub_containers(self, *labels):
""" Remove **all** containers that were dynamically created.
Args:
labels (str): labels to include in our search for finding
containers to scrub from the connected Docker engine.
Returns:
int: the number of containers stopped and removed.
"""
def stop_remove(c):
try:
c.stop()
c.remove()
except NotFound:
self.logger.warning('could not find container %s', c.name)
total = 0
self.logger.debug('scrubbing all containers by library')
# attempt to stop all the containers normally
self.stop_all_containers()
labels = ['browser', 'dynamic'] + list(set(labels))
threads = []
found = set()
# now close all dangling containers
for label in labels:
containers = self.docker.containers.list(
filters={'label': label})
count = len(containers)
self.logger.debug(
'found %d dangling containers with label %s',
count, label)
total += count
for c in containers:
if c.name not in found:
found.add(c.name)
threads.append(gevent.spawn(stop_remove, c))
for t in reversed(threads):
t.join()
return total
@check_engine
def start_container(self, spec, **kwargs):
""" Creates and runs a new container defined by ``spec``.
Args:
spec (dict): the specification of our docker container. This
can include things such as the name, labels, image,
restart conditions, etc. The built-in driver containers
already have this defined in their | |
"""
Class Client
package Bitpay
author <NAME>
version 3.3.2203
See bitpay.com/api for more information.
"""
import os
import json
from .config import Config
from .tokens import Tokens
from .models.facade import Facade
from .models.bill.bill import Bill
from .models.Rate.rate import Rate
from .utils.rest_cli import RESTcli
from .models.Rate.rates import Rates
from .models.currency import Currency
from .models.ledger.ledger import Ledger
from .models.wallet.wallet import Wallet
from .models.payout.payout import Payout
from .models.invoice.refund import Refund
from .models.invoice.invoice import Invoice
from .models.payout.payout_batch import PayoutBatch
from .models.ledger.ledger_entry import LedgerEntry
from .models.settlement.settlement import Settlement
from .exceptions.bitpay_exception import BitPayException
from .models.subscription.subscription import Subscription
from .models.payout.payout_recipient import PayoutRecipient
from .models.payout.payout_recipients import PayoutRecipients
from .exceptions.bill_query_exception import BillQueryException
from .exceptions.rate_query_exception import RateQueryException
from .exceptions.bill_update_exception import BillUpdateException
from .exceptions.ledger_query_exception import LedgerQueryException
from .exceptions.wallet_query_exception import WalletQueryException
from .exceptions.refund_query_exception import RefundQueryException
from .exceptions.payout_query_exception import PayoutQueryException
from .exceptions.bill_creation_exception import BillCreationException
from .exceptions.bill_delivery_exception import BillDeliveryException
from .exceptions.refund_update_exception import RefundUpdateException
from .exceptions.invoice_query_exception import InvoiceQueryException
from .exceptions.invoice_update_exception import InvoiceUpdateException
from .exceptions.currency_query_exception import CurrencyQueryException
from .exceptions.refund_creation_exception import RefundCreationException
from .exceptions.payout_creation_exception import PayoutCreationException
from .exceptions.invoice_payment_exception import InvoicePaymentException
from .exceptions.settlement_query_exception import SettlementQueryException
from .exceptions.invoice_creation_exception import InvoiceCreationException
from .exceptions.payoutbatch_query_exception import PayoutBatchQueryException
from .exceptions.subscription_query_exception import SubscriptionQueryException
from .exceptions.subscription_update_exception import SubscriptionUpdateException
from .exceptions.refund_notification_exception import RefundNotificationException
from .exceptions.refund_cancellation_exception import RefundCancellationException
from .exceptions.payout_cancellation_exception import PayoutCancellationException
from .exceptions.payout_notification_exception import PayoutNotificationException
from .exceptions.invoice_cancellation_exception import InvoiceCancellationException
from .exceptions.invoice_notification_exception import InvoiceNotificationException
from .exceptions.payoutbatch_creation_exception import PayoutBatchCreationException
from .exceptions.subscription_creation_exception import SubscriptionCreationException
from .exceptions.payout_recipient_query_exception import PayoutRecipientQueryException
from .exceptions.payout_recipient_update_exception import PayoutRecipientUpdateException
from .exceptions.payoutbatch_cancellation_exception import (
PayoutBatchCancellationException,
)
from .exceptions.payoutbatch_notification_exception import (
PayoutBatchNotificationException,
)
from .exceptions.payout_recipient_creation_exception import (
PayoutRecipientCreationException,
)
from .exceptions.payout_recipient_cancellation_exception import (
PayoutRecipientCancellationException,
)
from .exceptions.payout_recipient_notification_exception import (
PayoutRecipientNotificationException,
)
class Client:
"""
* Class Client
* @package Bitpay
* @author <NAME>
* @version 3.3.2203
* See bitpay.com/api for more information.
"""
__configuration = None
__env = None
__ec_key = None
__token_cache = None
__currencies_info = []
__restcli = None
def __init__(
self, config_file_path, environment=None, private_key=None, tokens=None
):
try:
if config_file_path:
self.build_config_from_file(config_file_path)
self.init_keys()
self.init()
else:
self.__env = environment
self.build_config(private_key, tokens)
self.init_keys()
self.init()
except Exception as exe:
raise BitPayException("failed to initiate client: " + str(exe))
def build_config_from_file(self, config_file_path: str):
try:
self.__configuration = Config()
if os.path.exists(config_file_path):
try:
read_file = open(config_file_path, "r")
json_data = json.loads(read_file.read())
self.__env = json_data["BitPayConfiguration"]["Environment"]
env_config = json_data["BitPayConfiguration"]["EnvConfig"][
self.__env
]
read_file.close()
except Exception as exe:
raise BitPayException(
"Error when reading configuration file", str(exe)
)
self.__configuration.set_environment(self.__env)
self.__configuration.set_envconfig({self.__env: env_config})
else:
raise BitPayException("Configuration file not found")
except Exception as exe:
raise BitPayException("failed to process configuration: " + str(exe))
def build_config(self, private_key_path: str, tokens: Tokens):
"""
:param private_key_path: path to private key
:param tokens: New tokens are provided with each response from the API.
"""
try:
self.__configuration = Config()
if os.path.exists(private_key_path):
read_file = open(private_key_path, "r")
plain_private_key = read_file.read()
self.__ec_key = plain_private_key
read_file.close()
else:
raise BitPayException("Private Key file not found")
env_config = {
"PrivateKeyPath": private_key_path,
"PrivateKey": plain_private_key,
"ApiTokens": tokens,
}
self.__configuration.set_environment(self.__env)
self.__configuration.set_envconfig({self.__env: env_config})
except Exception as exe:
raise BitPayException("failed to process configuration: " + str(exe))
def init_keys(self):
if not self.__ec_key:
try:
private_key_path = self.__configuration.get_envconfig()[self.__env][
"PrivateKeyPath"
]
if os.path.exists(private_key_path):
with open(private_key_path) as f:
self.__ec_key = f.read()
else:
plain_private_key = self.__configuration.get_envconfig()[
self.__env
]["PrivateKey"]
if plain_private_key:
self.__ec_key = plain_private_key
except Exception as exe:
raise BitPayException(
"When trying to load private key. Make sure the "
"configuration details are correct and the private key"
" and tokens are valid: ",
str(exe),
)
def init(self):
try:
proxy = None
if "proxy" in self.__configuration.get_envconfig()[self.__env]:
proxy = self.__configuration.get_envconfig()[self.__env]["proxy"]
self.__restcli = RESTcli(self.__env, self.__ec_key, proxy)
self.load_access_tokens()
self.__currencies_info = self.load_currencies()
except Exception as exe:
raise BitPayException(
"failed to deserialize BitPay server response" " (Token array): ",
str(exe),
)
def load_currencies(self):
try:
return []
except BitPayException as exe:
print(exe)
def load_access_tokens(self):
try:
self.clear_access_token_cache()
self.__token_cache = self.__configuration.get_envconfig()[self.__env][
"ApiTokens"
]
except Exception as exe:
raise BitPayException("When trying to load the tokens: ", str(exe))
def clear_access_token_cache(self):
self.__token_cache = Tokens()
def get_access_token(self, key: str):
try:
return self.__token_cache[key]
except Exception as exe:
raise BitPayException("There is no token for the specified key: ", str(exe))
# //////////////////////////////////////////////////////////////////////////////
# //////////////////////////////////////////////////////////////////////////////
# //////////////////////////////////////////////////////////////////////////////
# //////////////////////////////////////////////////////////////////////////////
# //////////////////////////////////////////////////////////////////////////////
# //////////////////////////////////////////////////////////////////////////////
# //////////////////////////////////////////////////////////////////////////////
def create_invoice(
self, invoice: Invoice, facade: str = Facade.Merchant, sign_request: bool = True
) -> Invoice:
"""
Create a BitPay invoice
:param Invoice invoice: An Invoice object with request parameters defined
:param str facade: The facade used to create it
:param str sign_request: Signed request
:return: A BitPay generated Invoice object
:rtype: Invoice
:raises BitPayException
:raises InvoiceCreationException
"""
try:
invoice.set_token(self.get_access_token(facade))
invoice_json = invoice.to_json()
response_json = self.__restcli.post("invoices", invoice_json, sign_request)
except BitPayException as exe:
raise InvoiceCreationException(
"failed to serialize Invoice object : " "%s" % str(exe),
exe.get_api_code(),
)
except Exception as exe:
raise InvoiceCreationException(
"failed to serialize Invoice object : %s" % str(exe)
)
try:
invoice = Invoice(**response_json)
except Exception as exe:
raise InvoiceCreationException(
"failed to deserialize BitPay server response "
"(Invoice) : %s" % str(exe)
)
return invoice
def get_invoice(
self, invoice_id: str, facade: str = Facade.Merchant, sign_request: bool = True
) -> Invoice:
"""
Retrieve a BitPay invoice by invoice id using the specified facade.
The client must have been previously authorized for the specified
facade (the public facade requires no authorization)
:param str invoice_id: The id of the invoice to retrieve
:param str facade: The facade used to create it
:param bool sign_request: Signed request
:return: A BitPay Invoice object
:rtype: Invoice
:raises BitPayException
:raises InvoiceQueryException
"""
try:
params = {"token": self.get_access_token(facade)}
response_json = self.__restcli.get(
"invoices/%s" % invoice_id, params, sign_request
)
except BitPayException as exe:
raise InvoiceQueryException(
"failed to serialize Invoice object : " "%s" % str(exe),
exe.get_api_code(),
)
except Exception as exe:
raise InvoiceQueryException(
"failed to serialize Invoice object :" " %s" % str(exe)
)
try:
invoice = Invoice(**response_json)
except Exception as exe:
raise InvoiceQueryException(
"failed to deserialize BitPay server response"
" (Invoice) : %s" % str(exe)
)
return invoice
def get_invoices(
self,
date_start: str,
date_end: str,
status: str = None,
order_id: str = None,
limit: int = None,
offset: int = None,
) -> [Invoice]:
"""
Retrieve a collection of BitPay invoices.
:param str date_start: The first date for the query filter.
:param str date_end: The last date for the query filter.
:param str status: The invoice status you want to query on.
:param str order_id: The optional order id specified at time of invoice creation.
:param int limit: Maximum results that the query will return (useful for paging results).
:param int offset: Number of results to offset
(ex. skip 10 will give you results starting with the 11th)
:return: A list of BitPay Invoice objects.
:rtype: [Invoice]
:raises BitPayException
:raises InvoiceQueryException
"""
try:
params = {
"token": self.get_access_token(Facade.Merchant),
"dateStart": date_start,
"date_end": date_end,
}
if status:
params["status"] = status
if order_id:
params["order_id"] = order_id
if limit:
params["limit"] = limit
if offset:
params["offset"] = offset
response_json = self.__restcli.get("invoices/", parameters=params)
except BitPayException as exe:
raise InvoiceQueryException(
"failed to serialize Invoice object : %s" % str(exe), exe.get_api_code()
)
except Exception as exe:
raise InvoiceQueryException(
"failed to serialize Invoice object : %s" % str(exe)
)
try:
invoices = []
for invoice_data in response_json:
invoices.append(Invoice(**invoice_data))
except Exception as exe:
raise InvoiceQueryException(
"failed to deserialize BitPay server "
"response (Invoice) : %s" % str(exe)
)
return invoices
def update_invoice(self, invoice_id: str, buyer_email: str) -> Invoice:
"""
Update a BitPay invoice with communication method.
:param str invoice_id: The id of the invoice to updated.
:param str buyer_email: The buyer's email address.
:return: A BitPay generated Invoice object.
:rtype: Invoice
:raises BitPayException
:raises InvoiceUpdateException
"""
try:
params = {
"token": self.get_access_token(Facade.Merchant),
"buyer_email": buyer_email,
}
response_json = self.__restcli.update("invoices/%s" % invoice_id, params)
except BitPayException as exe:
raise InvoiceUpdateException(
"failed to serialize Invoice object :" " %s" % str(exe),
exe.get_api_code(),
)
except Exception as exe:
raise InvoiceUpdateException(
"failed to serialize Invoice object : %s" % str(exe)
)
try:
invoice = Invoice(**response_json)
except Exception as exe:
raise InvoiceUpdateException(
"failed to deserialize BitPay server response"
" (Invoice) : %s" % str(exe)
)
return invoice
def cancel_invoice(self, invoice_id: str, force_cancel: bool = False) -> Invoice:
"""
Delete a previously created BitPay invoice.
:param str invoice_id: The Id of the BitPay invoice to be canceled.
:param bool force_cancel: Query param that will cancel the invoice even if
no contact information is present
:return: A BitPay generated Invoice object.
:rtype: Invoice
:raises BitPayException
:raises InvoiceCancellationException
"""
try:
params = {
"token": self.get_access_token(Facade.Merchant),
"force_cancel": force_cancel,
}
response_json = self.__restcli.delete("invoices/%s" % invoice_id, params)
except BitPayException as exe:
raise InvoiceCancellationException(
"failed to serialize Invoice object : %s" | |
Transversal Mercator projection - returns list of (x,y) tuples (unit: m)
"""
return [ p.transMercator(centerlon) for p in self.points ]
def simplify (self, delta=1.0):
"""
Reduce number of track points while preserving the shape of the track segment as much as possible.
The parameter delta gives the minimum distance [m] that a track point should have
from the straight line connecting its predecessor and successor.
"""
n = len(self.points)
if n <= 2:
# there's nothing to simplify
return
idx = [0] + self._simplify (0, n-1, delta) + [n-1]
self.points = [ self.points[i] for i in idx ]
def _simplify (self, start, end, delta):
# Reduce number of intermediate points between points[start] and points[end]
if end-start <= 1:
return [] # there are no intermediate points
# calculate the point with maximum distance
# from the straight line between points[start] and points[end]
max_dist = 0.0
max_index = None
for k in range(start+1, end):
d = self.points[k].distance (self.points[start], self.points[end])
if d > max_dist:
max_dist = d
max_index = k
if max_dist < delta:
return [] # discard all intermediate points
# recursion
return (
self._simplify(start, max_index, delta)
+ [max_index]
+ self._simplify(max_index, end, delta))
def toKML (self, doc):
"""
convert to KML Element <LineString>
"""
res = doc.createElement ("LineString")
coord = doc.createElement ("coordinates")
coordStr = " ".join([p.toKML(doc) for p in self.points])
coord.appendChild (doc.createTextNode (coordStr))
res.appendChild (coord)
return res
# ----------------------------------------
# Representation of a GPX track segment
# ----------------------------------------
class TrackSegment(LineString):
"""
Representation of a GPX track segment
Constructor:
TrackSegment(points=[])
"""
def __init__ (self, points=None):
if points == None:
self.points = []
else:
# points must be an iterable whose items can be cast to TrackPoint
self.points = [ TrackPoint.cast(p) for p in points ]
@staticmethod
def cast (obj):
"""
try to cast an object to a TrackSegment
"""
if isinstance(obj, TrackSegment):
return obj
elif hasattr(obj, "__iter__"):
# the object is iterable - try to cast its items to TrackPoints
return TrackSegment(points=obj)
else:
raise TypeError
def append (self, point):
self.points.append(TrackPoint.cast(point))
def extend (self, linestring):
self.points.extend([TrackPoint.cast(p) for p in linestring])
def toGPX (self, doc):
"""
convert to GPX Element <trkseg>
"""
res = doc.createElement("trkseg")
for p in self.points:
res.appendChild (p.toGPX (doc))
return res
@staticmethod
def fromGPX (trkseg):
"""
parse GPX Element <trkseg>
"""
res = TrackSegment()
for trkpt in trkseg.getElementsByTagName("trkpt"):
res.appendPoint (TrackPoint.fromGPX (trkpt))
return res
@staticmethod
def fromKML (linestring):
"""
parse KML Element <LineString>
"""
trkseg = TrackSegment()
coords = linestring.getElementsByTagName("coordinates")[0].childNodes[0].data.strip()
for c in coords.split ():
lon, lat, ele = c.split (",")
lon = float(lon)
lat = float(lat)
if ele == "0":
ele = None
else:
ele = float(ele)
trkseg.appendPoint (TrackPoint (lon, lat, ele))
return trkseg
# ----------------------------------------
class Route(LineString):
"""
Representation of a GPX route
Constructor:
Route(name=None, description=None, points=[], **kwargs)
keyword arguments: commment, source, number, type
"""
_attributes = ["comment", "source", "number", "type"]
"""
list of additional attributes
"""
# TODO: missing attributes: link, extensions
def __init__ (self, name=None, description=None, points=None, **kwargs):
self.name = name
self.description = description
if points == None:
self.points = []
else:
# points must be an iterable whose items can be cast to TrackPoint
self.points = [ RoutePoint.cast(p) for p in points ]
# process additional arguments
for keyword in self._attributes:
if keyword in kwargs:
setattr(self, keyword, kwargs[keyword])
else:
setattr(self, keyword, None)
@classmethod
def cast (cls, obj):
"""
try to cast an object to a Route
"""
if isinstance(obj, cls):
return obj
elif hasattr(obj, "__iter__"):
# the object is iterable - try to cast its items to RoutePoints
res = Route(points=obj)
# copy attributes
for attr in ["name", "description"] + cls._attributes:
if hasattr(obj, attr):
setattr(res, attr, getattr(obj, attr))
return res
else:
raise TypeError
def append (self, point):
self.points.append(RoutePoint.cast(point))
def extend (self, linestring):
self.points.extend([RoutePoint.cast(p) for p in linestring])
def toGPX (self, doc):
"""
convert to GPX Element <rte>
"""
res = doc.createElement("rte")
if self.name != None:
e = doc.createElement("name")
e.appendChild (doc.createTextNode(self.name))
res.appendChild (e)
if self.comment != None:
e = doc.createElement("cmt")
e.appendChild (doc.createTextNode(self.comment))
res.appendChild (e)
if self.description != None:
e = doc.createElement("desc")
e.appendChild (doc.createTextNode(self.description))
res.appendChild (e)
if self.source != None:
e = doc.createElement("src")
e.appendChild (doc.createTextNode(self.source))
res.appendChild (e)
if self.number != None:
e = doc.createElement("number")
e.appendChild (doc.createTextNode(repr(self.number)))
res.appendChild (e)
if self.type != None:
e = doc.createElement("type")
e.appendChild (doc.createTextNode(self.type))
res.appendChild (e)
for p in self.points:
res.appendChild (p.toGPX (doc))
return res
@staticmethod
def fromGPX (rte):
"""
parse GPX Element <rte>
"""
name = None
for e in rte.getElementsByTagName("name"):
name = e.childNodes[0].data.strip()
comment = None
for e in rte.getElementsByTagName("cmt"):
comment = e.childNodes[0].data.strip()
description = None
for e in rte.getElementsByTagName("desc"):
description = e.childNodes[0].data.strip()
source = None
for e in rte.getElementsByTagName("src"):
source = e.childNodes[0].data.strip()
number = None
for e in rte.getElementsByTagName("number"):
number = int(e.childNodes[0].data)
type = None
for e in rte.getElementsByTagName("type"):
type = int(e.childNodes[0].data)
points = []
res = Route(name, description, points=[],
comment=comment, source=source, number=number, type=type)
for rtept in rte.getElementsByTagName("rtept"):
res.append(RoutePoint.fromGPX (rtept))
return res
def toKML (self, doc):
"""
convert to KML Element <Placemark>
"""
res = doc.createElement ("Placemark")
if self.name != None:
e = doc.createElement("name")
e.appendChild (doc.createTextNode(self.name))
res.appendChild (e)
if self.description != None:
e = doc.createElement("description")
e.appendChild (doc.createTextNode(self.description))
res.appendChild (e)
linestring = doc.createElement ("LineString")
coord = doc.createElement ("coordinates")
coordStr = " ".join([p.toKML(doc) for p in self.points])
coord.appendChild (doc.createTextNode (coordStr))
linestring.appendChild (coord)
res.appendChild (linestring)
return res
# ----------------------------------------
# Common base class for track points, route points and waypoints
# ----------------------------------------
class Point(LatLon):
"""
Common base class for track points (class TrackPoint), route points (class RoutePoint)
and waypoints (class Waypoint)
Constructor: Point(lat, lon, ele=None, t=None, name=None)
"""
def __init__ (self, lat, lon, ele=None, t=None, name=None):
LatLon.__init__(self, lat, lon)
self.ele = ele
self.t = t
self.name = name
def __str__ (self):
s = [ self.__class__.__name__, '(', str(self._lat), ',', str(self._lon) ]
if self.ele != None:
s += [ ',ele=', str(self.ele) ]
if self.t != None:
s += [ ',t=', str(self.t) ]
if self.name != None:
s += [ ',name=', unicode(self.name) ]
s.append(')')
return ''.join(s)
def __repr__ (self):
s = [ self.__class__.__name__, '(', repr(self._lat), ',', repr(self._lon),
',', repr(self.ele), ',', repr(self.t), ',', repr(self.name), ')' ]
return ''.join(s)
def __eq__ (self, p):
return (
self.__class__ == p.__class__
and self._lat == p._lat and self._lon == p._lon
and self.ele == p.ele and self.t == p.t
and self.name == p.name)
def __ne__ (self, p):
return not (self == p)
def __hash__ (self):
return hash((self.lat, self.lon, self.ele, self.t, self.name))
@classmethod
def cast (cls, obj):
"""
try to cast an object to class cls (which is a subclass of Point)
"""
if isinstance (obj, cls):
return obj
elif isinstance (obj, LatLon):
p = cls(obj.lat, obj.lon)
if hasattr(obj, "ele"):
p.ele = obj.ele
if hasattr(obj, "t"):
p.t = obj.t
if hasattr(obj, "name"):
p.name = obj.name
return p
else:
raise TypeError
def _toGPX (self, doc, elementName):
"""
convert to GPX Element (called by the toGPX() methods of subclasses)
"""
res = doc.createElement(elementName)
res.setAttribute ("lat", ("%.6f" % self.lat))
res.setAttribute ("lon", ("%.6f" % self.lon))
if self.name != None:
e = doc.createElement("name")
e.appendChild (doc.createTextNode(self.name))
res.appendChild (e)
if self.ele != None:
e = doc.createElement("ele")
e.appendChild (doc.createTextNode(str(self.ele)))
res.appendChild (e)
if self.t != None:
e = doc.createElement("time")
t = self.t.strftime ("%Y-%m-%dT%H:%M:%SZ")
e.appendChild (doc.createTextNode(t))
res.appendChild (e)
return res
@classmethod
def fromGPX (cls, element):
"""
parse GPX Element <wpt>, <trkpt> or <rtept>
"""
lat = float(element.getAttribute("lat"))
lon = float(element.getAttribute("lon"))
name = None
for e in element.getElementsByTagName("name"):
name = e.childNodes[0].data.strip()
ele = None
for e in element.getElementsByTagName("ele"):
ele = float(e.childNodes[0].data.strip())
t = None
for e in element.getElementsByTagName("time"):
t = dateutil.parser.parse (e.childNodes[0].data.strip())
return cls (lat, lon, ele, t, name)
# ----------------------------------------
# Representation of a GPX track point
# ----------------------------------------
class TrackPoint(Point):
"""
Representation of a GPX track point
Constructor: TrackPoint(lat, lon, ele=None, t=None, name=None)
"""
def toGPX (self, doc):
"""
convert to GPX Element <trkpt>
"""
return Point._toGPX(self, doc, "trkpt")
def toKML (self, doc):
"""
convert to KML-formatted coordinate string
"""
if self.ele is None:
return "%f,%f,0" % (self.lon, self.lat)
else:
return "%f,%f,%.3f" % (self.lon, self.lat, self.ele)
# ----------------------------------------
# Representation of a GPX route point
# ----------------------------------------
class RoutePoint(Point):
"""
Representation of a GPX | |
if self.grid_error:
self.logger.warning(
f'The following variants have errored grid points:'
f'\n{self.grid_error}')
# close the file
self.logger.info(
f'\n# Successfully created database: {self.hdf5}\n')
def aug_data(self, augmentation, keep_existing_aug=True, random_seed=None):
"""Augment exiting original PDB data and features.
Args:
augmentation(int): Times of augmentation
keep_existing_aug (bool, optional): Keep existing augmentated data.
If False, existing aug will be removed. Defaults to True.
Examples:
>>> database = DataGenerator(h5='database.h5')
>>> database.aug_data(augmentation=3, append=True)
>>> grid_info = {
>>> 'number_of_points': [20,20,20],
>>> 'resolution': [1.,1.,1.],
>>> 'atomic_densities': {'C':1.7, 'N':1.55, 'O':1.52, 'S':1.8},
>>> }
>>> database.map_features(grid_info)
"""
# check if file exists
if not os.path.isfile(self.hdf5):
raise FileNotFoundError(
'File %s does not exists' % self.hdf5)
# get the folder names
f5 = h5py.File(self.hdf5, 'a')
fnames = f5.keys()
# get the non rotated ones
fnames_original = list(
filter(lambda x: not re.search(r'_r\d+$', x), fnames))
# get the rotated ones
fnames_augmented = list(
filter(lambda x: re.search(r'_r\d+$', x), fnames))
aug_id_start = 0
if keep_existing_aug:
exiting_augs = list(
filter(lambda x: re.search(fnames_original[0] + r'_r\d+$', x), fnames_augmented))
aug_id_start += len(exiting_augs)
else:
for i in fnames_augmented:
del f5[i]
self.logger.info(
f'{"":s}\n# Start augmenting data'
f' with {augmentation} times...')
# GET ALL THE NAMES
for variant_name in fnames_original:
variant_aug_name_list = [
variant_name + '_r%03d' % (idir + 1) for idir in
range(aug_id_start, aug_id_start + augmentation)]
variant = hdf5data.load_variant(f5[variant_name])
# loop over the complexes
for variant_aug_name in variant_aug_name_list:
# create a subgroup for the variant
variant_group = f5.require_group(variant_aug_name)
variant_group.attrs['type'] = 'variant'
hdf5data.store_variant(variant_group, variant)
# copy the ref into it
if 'native' in f5[variant_name]:
f5.copy(variant_name + '/native', variant_group)
# get the rotation axis and angle
if self.align is None:
rotation_axis, rotation_angle = pdb2sql.transform.get_rot_axis_angle(random_seed)
else:
rotation_axis, rotation_angle = self._get_aligned_rotation_axis_angle(random_seed, self.align)
# create the new pdb and get molecule center
# molecule center is the origin of rotation)
rotation_center = self._add_aug_pdb(variant_group, variant, 'complex',
rotation_axis, rotation_angle)
# copy the targets/features
if 'targets' in f5[variant_name]:
f5.copy(variant_name + '/targets/', variant_name)
f5.copy(variant_name + '/features/', variant_group)
# rotate the feature
self._rotate_feature(variant_group, rotation_axis, rotation_angle, rotation_center)
# store the rotation axis/angl/center as attriutes
# in case we need them later
variant_group.attrs['rotation_axis'] = rotation_axis
variant_group.attrs['rotation_angle'] = rotation_angle
variant_group.attrs['rotation_center'] = rotation_center
f5.close()
self.logger.info(
f'\n# Successfully augmented data in {self.hdf5}')
# ====================================================================================
#
# ADD FEATURES TO AN EXISTING DATASET
#
# ====================================================================================
def add_feature(self, remove_error=True, prog_bar=True, distance_cutoff=10.0):
"""Add a feature to an existing hdf5 file.
Args:
remove_error (bool): remove errored variant
prog_bar (bool, optional): use tqdm
distance_cutoff (float, optional): max distance from center to include atoms, default 10.0 Å
Example:
>>> h5file = '1ak4.hdf5'
>>>
>>> #init the data assembler
>>> database = DataGenerator(compute_features = ['deeprank.features.ResidueDensity'],
>>> hdf5=h5file)
>>>
>>> database.add_feature(remove_error=True, prog_bar=True)
"""
# check if file exists
if not os.path.isfile(self.hdf5):
raise FileNotFoundError(
'File %s does not exists' % self.hdf5)
# get the folder names
f5 = h5py.File(self.hdf5, 'a')
fnames = f5.keys()
# get the non rotated ones
fnames_original = list(
filter(lambda x: not re.search(r'_r\d+$', x), fnames))
# get the rotated ones
fnames_augmented = list(
filter(lambda x: re.search(r'_r\d+$', x), fnames))
# check feature_error
if not self.feature_error:
self.feature_error = []
# computes the features of the original
desc = '{:25s}'.format('Add features')
for cplx_name in tqdm(
fnames_original,
desc=desc,
ncols=100,
disable=not prog_bar):
# variant group
variant_group = f5[cplx_name]
variant = hdf5data.load_variant(variant_group)
error_flag = False
if self.compute_features is not None:
# the internal features
variant_group.require_group('features')
variant_group.require_group('features_raw')
error_flag = self._compute_features(self.compute_features,
self.environment,
distance_cutoff,
variant_group['features'],
variant,
self.logger)
if error_flag:
self.feature_error += [cplx_name]
# copy the data from the original to the augmented
for cplx_name in fnames_augmented:
# group of the variant
aug_variant_group = f5[cplx_name]
# get the source group
variant_name = re.split(r'_r\d+', variant_group.name)[0]
src_variant_group = f5[variant_name]
# get the rotation parameters
axis = aug_variant_group.attrs['axis']
angle = aug_variant_group.attrs['angle']
center = aug_variant_group.attrs['center']
# copy the features to the augmented
for k in variant_group['features']:
if k not in aug_variant_group['features']:
# copy
data = src_variant_group['features/' + k][()]
aug_variant_group.require_group('features')
aug_variant_group.create_dataset("features/" + k, data=data, compression='lzf', chunks=True)
# rotate
self._rotate_feature(aug_variant_group, axis, angle, center, feat_name=[k])
# find errored augmented variants
tmp_aug_error = []
for variant_name in self.feature_error:
tmp_aug_error += list(filter(lambda x: variant_name in x,
fnames_augmented))
self.feature_error += tmp_aug_error
# Remove errored variants
if self.feature_error:
if remove_error:
for variant_name in self.feature_error:
del f5[variant_name]
self.logger.info(
f'Molecules with errored features are removed:\n'
f'{self.feature_error}')
else:
self.logger.warning(
f"The following variants have errored features:\n"
f'{self.feature_error}')
# close the file
f5.close()
# ====================================================================================
#
# ADD TARGETS TO AN EXISTING DATASET
#
# ====================================================================================
def add_unique_target(self, targdict):
"""Add identical targets for all the complexes in the datafile.
This is usefull if you want to add the binary class of all the complexes
created from decoys or natives
Args:
targdict (dict): Example: {'DOCKQ':1.0}
>>> database = DataGenerator(hdf5='1ak4.hdf5')
>>> database.add_unique_target({'DOCKQ':1.0})
"""
# check if file exists
if not os.path.isfile(self.hdf5):
raise FileNotFoundError(
'File %s does not exists' % self.hdf5)
f5 = h5py.File(self.hdf5, 'a')
for variant_name in list(f5.keys()):
targrp = f5[variant_name].require_group('targets')
for name, value in targdict.items():
targrp.create_dataset(name, data=np.array([value]), chunks=True)
f5.close()
def add_target(self, prog_bar=False):
"""Add a target to an existing hdf5 file.
Args:
prog_bar (bool, optional): Use tqdm
Example:
>>> h5file = '1ak4.hdf5'
>>>
>>> #init the data assembler
>>> database = DataGenerator(compute_targets =['deeprank.targets.binary_class'],
>>> hdf5=h5file)
>>>
>>> database.add_target(prog_bar=True)
"""
# check if file exists
if not os.path.isfile(self.hdf5):
raise FileNotFoundError(
'File %s does not exists' % self.hdf5)
# name of the hdf5 file
f5 = h5py.File(self.hdf5, 'a')
# get the folder names
fnames = f5.keys()
# get the non rotated ones
fnames_original = list(
filter(lambda x: not re.search(r'_r\d+$', x), fnames))
fnames_augmented = list(
filter(lambda x: re.search(r'_r\d+$', x), fnames))
# compute the targets of the original
desc = '{:25s}'.format('Add targets')
for variant_name in tqdm(fnames_original, desc=desc,
ncols=100, disable=not prog_bar):
# group of the variant
variant_group = f5[variant_name]
# add the targets
if self.compute_targets is not None:
variant = hdf5data.load_variant(f5[variant_name])
variant_group.require_group('targets')
self._compute_targets(self.compute_targets,
variant,
variant_group['targets'])
# copy the targets of the original to the rotated
for cplx_name in fnames_augmented:
# group of the variant
aug_variant_group = f5[cplx_name]
# get the source group
variant_name = re.split(r'_r\d+', variant_group.name)[0]
src_variant_group = f5[variant_name]
# copy the targets to the augmented
for k in variant_group['targets']:
if k not in aug_variant_group['targets']:
data = src_variant_group['targets/' + k][()]
aug_variant_group.require_group('targets')
aug_variant_group.create_dataset("targets/" + k, data=data, chunks=True)
# close the file
f5.close()
def realign_complexes(self, align, compute_features=None, pssm_source=None, distance_cutoff=10.0):
"""Align all the complexes already present in the HDF5.
Arguments:
align {dict} -- alignement dictionary (see __init__)
Keyword Arguments:
compute_features {list} -- list of features to be computed
if None computes the features specified in
the attrs['features'] of the file (if present)
pssm_source {str} -- path of the pssm files. If None the source specfied in
the attrs['pssm_source'] will be used (if present) (default: {None})
distance_cutoff {float} -- max distance from the center to include atoms (default 10.0 Å)
Raises:
ValueError: If no PSSM detected
Example:
>>> database = DataGenerator(hdf5='1ak4.hdf5')
>>> # if comute_features and pssm_source are not specified
>>> # the values in hdf5.attrs['features'] and hdf5.attrs['pssm_source'] will be used
>>> database.realign_complex(align={'axis':'x'},
>>> compute_features['deeprank.features.X'],
>>> pssm_source='./1ak4_pssm/')
"""
f5 = h5py.File(self.hdf5, 'a')
variant_names = f5.keys()
self.logger.info(
f'\n# Start aligning the HDF5 database: {self.hdf5}')
# deal with the features
if self.compute_features is None:
if compute_features is None:
if 'features' in f5.attrs:
self.compute_features = list(f5.attrs['features'])
else:
self.compute_features = compute_features
# deal with the pssm source
if self.pssm_source is not None:
config.PATH_PSSM_SOURCE = self.pssm_source
elif pssm_source is not None:
config.PATH_PSSM_SOURCE = pssm_source
elif 'pssm_source' in f5.attrs:
config.PATH_PSSM_SOURCE = f5.attrs['pssm_source']
else:
raise ValueError('No pssm source detected')
# loop over the complexes
desc = '{:25s}'.format('Add features')
for variant_name in tqdm(variant_names, desc=desc, ncols=100):
variant = hdf5data.load_variant(f5[variant_name])
# align the pdb
variant_group = f5[variant_name]
pdb = variant_group['complex'][()]
sqldb = self._get_aligned_sqldb(pdb, align)
data = sqldb.sql2pdb()
data = np.array(data).astype('|S78')
variant_group['complex'][...] = data
# remove prexisting features
old_dir = ['features', 'features_raw', 'mapped_features']
for od in old_dir:
if od in variant_group:
del variant_group[od]
# the internal features
variant_group.require_group('features')
variant_group.require_group('features_raw')
# compute features
error_flag = self._compute_features(self.compute_features,
self.environment,
distance_cutoff,
variant_group['features'],
variant,
self.logger)
f5.close()
# ====================================================================================
#
# PRECOMPUTE THE GRID POINTS
#
# ====================================================================================
@staticmethod
def get_grid_center(environment, variant):
"gets the C-alpha position of the variant residue"
pdb_path = get_pdb_path(environment.pdb_root, variant.pdb_ac)
pdb = pdb2sql.pdb2sql(pdb_path)
try:
c_alpha_positions = pdb.get_xyz(chainID=variant.chain_id,
resSeq=variant.residue_number,
name="CA")
finally:
| |
'and using the estimation.'
dynSymSectionIgnore = True
# check if the size of the ".dynsym" section matches the
# estimated size
elif dynSymSection.elfN_shdr.sh_size != estimatedSymbolTableSize:
# check if forceDynSymParsing was not set (default value is 0)
if self.forceDynSymParsing == 0:
print 'WARNING: ".dynsym" size does not match the estimated ' \
+ 'size. One (or both) are wrong. Ignoring the dynamic ' \
+ ' symbols. You can force the using of the ".dynsym" ' \
+ 'section by setting "forceDynSymParsing=1" or force ' \
+ 'the using of the estimated size by setting ' \
+ '"forceDynSymParsing=2".'
# ignore dynamic symbols
dynSymSectionIgnore = True
dynSymEstimationIgnore = True
# forcing the use of the ".dynsym" section
elif self.forceDynSymParsing == 1:
dynSymSectionIgnore = False
dynSymEstimationIgnore = True
# forcing the use of the estimation
elif self.forceDynSymParsing == 2:
dynSymSectionIgnore = True
dynSymEstimationIgnore = False
# value does not exists
else:
raise TypeError('"forceDynSymParsing" uses an invalid value.')
# use ".dynsym" section information (when considered correct)
if dynSymSectionIgnore is False:
# parse the complete symbol table based on the
# ".dynsym" section
for i in range(dynSymSection.elfN_shdr.sh_size \
/ symbolEntrySize):
tempOffset = symbolTableOffset + (i*symbolEntrySize)
tempSymbol = self._parseDynamicSymbol(tempOffset,
stringTableOffset, stringTableSize)
# add entry to dynamic symbol entries list
self.dynamicSymbolEntries.append(tempSymbol)
# use estimation to parse dynamic symbols
elif (dynSymSectionIgnore is True
and dynSymEstimationIgnore is False):
# parse the complete symbol table based on the
# estimation
for i in range(estimatedSymbolTableSize \
/ symbolEntrySize):
tempOffset = symbolTableOffset + (i*symbolEntrySize)
tempSymbol = self._parseDynamicSymbol(tempOffset,
stringTableOffset, stringTableSize)
# add entry to dynamic symbol entries list
self.dynamicSymbolEntries.append(tempSymbol)
# holds tuples: (type, offset, size, targetlist)
relocTODO = []
# DT_JMPREL
if jmpRelOffset is not None:
if pltRelType is None:
raise ValueError('DT_JMPREL present but DT_PLTREL not.')
if pltRelSize is None:
raise ValueError('DT_JMPREL present but DT_PLTRELSZ not.')
if pltRelType == D_tag.DT_REL:
if relEntrySize is None:
raise ValueError('DT_JMPREL present with ' \
+ ' DT_PLTREL == DT_REL, but DT_RELSZ not present')
elif pltRelType == D_tag.DT_RELA:
if relaEntrySize is None:
raise ValueError('DT_JMPREL present with ' \
+ ' DT_PLTREL == DT_RELA, but DT_RELASZ not present')
else:
raise ValueError('Invalid/unexpected DT_PLTREL (pltRelType).')
self.jumpRelocationEntries = list()
relocTODO.append((pltRelType, jmpRelOffset, pltRelSize,
self.jumpRelocationEntries))
# DT_REL (only mandatory hwn DT_RELA is not present)
if relOffset is not None:
if relSize is None:
raise ValueError('DT_REL present but DT_RELSZ not.')
if relEntrySize is None:
raise ValueError('DT_REL present but DT_RELENT not.')
self.relocationEntries = list()
relocTODO.append((D_tag.DT_REL, relOffset, relSize,
self.relocationEntries))
# DT_RELA
if relaOffset is not None:
if relaSize is None:
raise ValueError('DT_RELA present but DT_RELASZ not.')
if relaEntrySize is None:
raise ValueError('DT_RELA present but DT_RELAENT not.')
self.relocationEntries = list()
relocTODO.append((D_tag.DT_RELA, relaOffset, relaSize,
self.relocationEntries))
if relOffset is not None and relaOffset is not None:
raise RuntimeError('INTERNAL ERROR: TODO REL READ 1')
if len(relocTODO) < 1:
raise RuntimeError('INTERNAL ERROR: TODO REL READ 2')
for relocType, relocOffset, relocSize, relocList in relocTODO:
if relocType == D_tag.DT_REL:
relocEntrySize = relEntrySize
elif relocType == D_tag.DT_RELA:
relocEntrySize = relaEntrySize
if relocType == D_tag.DT_REL:
if self.bits == 32:
structFmt = '<II'
elif self.bits == 64:
structFmt = '<QQ'
elif relocType == D_tag.DT_RELA:
if self.bits == 32:
structFmt = '<IIi'
elif self.bits == 64:
structFmt = '<QQq'
assert struct.calcsize(structFmt) == relocEntrySize
for i in range(relocSize / relocEntrySize):
tempOffset = relocOffset + i*relocEntrySize
if relocType == D_tag.DT_REL:
relocEntry = ElfN_Rel()
(
# ElfN_Addr r_offset; (N = 32/64)
# in executable and share object files
# => r_offset holds a virtual address
relocEntry.r_offset,
# ElfN_Word r_info; (N = 32/64)
relocEntry.r_info,
) = struct.unpack(structFmt, self.data[tempOffset:tempOffset+relocEntrySize])
elif relocType == D_tag.DT_RELA:
relocEntry = ElfN_Rela()
(
# ElfN_Addr r_offset; (N = 32/64)
# in executable and share object files
# => r_offset holds a virtual address
relocEntry.r_offset,
# ElfN_Word r_info; (N = 32/64)
relocEntry.r_info,
relocEntry.r_addend,
) = struct.unpack(structFmt, self.data[tempOffset:tempOffset+relocEntrySize])
del tempOffset
(relocEntry.r_sym, relocEntry.r_type) = \
self.relocationSymIdxAndTypeFromInfo(relocEntry.r_info)
# get values from the symbol table
tempOffset = symbolTableOffset \
+ (relocEntry.r_sym*symbolEntrySize)
tempSymbol = self._parseDynamicSymbol(tempOffset,
stringTableOffset, stringTableSize)
# check if parsed dynamic symbol already exists
# if it does => use already existed dynamic symbol
# else => use newly parsed dynamic symbol
dynamicSymbolFound = False
for dynamicSymbol in self.dynamicSymbolEntries:
if (tempSymbol.ElfN_Sym.st_name
== dynamicSymbol.ElfN_Sym.st_name
and tempSymbol.ElfN_Sym.st_value
== dynamicSymbol.ElfN_Sym.st_value
and tempSymbol.ElfN_Sym.st_size
== dynamicSymbol.ElfN_Sym.st_size
and tempSymbol.ElfN_Sym.st_info
== dynamicSymbol.ElfN_Sym.st_info
and tempSymbol.ElfN_Sym.st_other
== dynamicSymbol.ElfN_Sym.st_other
and tempSymbol.ElfN_Sym.st_shndx
== dynamicSymbol.ElfN_Sym.st_shndx):
relocEntry.symbol = dynamicSymbol
dynamicSymbolFound = True
break
if dynamicSymbolFound is False:
relocEntry.symbol = tempSymbol
relocList.append(relocEntry)
# this function dumps a list of relocations (used in printElf())
# return values: None
def printRelocations(self, relocationList, title):
printAddend = len(relocationList) \
and type(relocationList[0]) == ElfN_Rela
# output all jump relocation entries
print("%s (%d entries)" % (title, len(relocationList)))
print("No."),
print("\t"),
print("MemAddr"),
print("\t"),
print("File offset"),
print("\t"),
print("Info"),
print("\t\t"),
print("Type"),
print("\t\t"),
if printAddend:
print("Addend"),
print("\t\t"),
print("Sym. value"),
print("\t"),
print("Sym. name"),
print
print("\t"),
print("(r_offset)"),
print("\t"),
print("\t"),
print("\t"),
print("(r_info)"),
print("\t"),
print("(r_type)"),
if printAddend:
print("\t"),
print("(r_addend)"),
print
counter = 0
for entry in relocationList:
symbol = entry.symbol.ElfN_Sym
print("%d" % counter),
print("\t"),
print("0x" + ("%x" % entry.r_offset).zfill(8)),
print("\t"),
# try to convert the virtual memory address to a file offset
# in executable and share object files
# => r_offset holds a virtual address
try:
print("0x" + ("%x" \
% self.virtualMemoryAddrToFileOffset(
entry.r_offset)).zfill(8)),
except:
print("None\t"),
print("\t"),
print("0x" + ("%x" % entry.r_info).zfill(8)),
print("\t"),
# translate type
if entry.r_type in R_type.reverse_lookup.keys():
print("%s" % R_type.reverse_lookup[entry.r_type]),
else:
print("0x%x" % entry.r_type),
if printAddend:
if type(entry) == ElfN_Rela:
print("\t"),
print("0x" + ("%x" % entry.r_addend).zfill(8)),
else:
print("\t\t"),
print("\t"),
print("0x" + ("%x" % symbol.st_value).zfill(8)),
print("\t"),
print(entry.symbol.symbolName),
print
counter += 1
print
# this function outputs the parsed ELF file (like readelf)
# return values: None
def printElf(self):
# check if the file was completely parsed before
if self.fileParsed is False:
raise ValueError("Operation not possible. " \
+ "File was not completely parsed before.")
# output header
print "ELF header:"
print "Type: %s" % ElfN_Ehdr.E_type.reverse_lookup[self.header.e_type]
print "Version: %s" \
% ElfN_Ehdr.EI_VERSION.reverse_lookup[self.header.e_ident[6]]
print "Machine: %s" \
% ElfN_Ehdr.E_machine.reverse_lookup[self.header.e_machine]
print "Entry point address: 0x%x" % self.header.e_entry
print "Program header table offset in bytes: 0x%x (%d)" \
% (self.header.e_phoff, self.header.e_phoff)
print "Section header table offset in bytes: 0x%x (%d)" \
% (self.header.e_shoff, self.header.e_shoff)
print "Flags: 0x%x (%d)" % (self.header.e_flags, self.header.e_flags)
print "Size of ELF header in bytes: 0x%x (%d)" \
% (self.header.e_ehsize, self.header.e_ehsize)
print "Size of each program header entry in bytes: 0x%x (%d)" \
% (self.header.e_phentsize, self.header.e_phentsize)
print "Number of program header entries: %d" % self.header.e_phnum
print "Size of each sections header entry in bytes: 0x%x (%d)" \
% (self.header.e_shentsize, self.header.e_shentsize)
print "Number of section header entries: %d" % self.header.e_shnum
print "Section header string table index: %d" % self.header.e_shstrndx
print
# output of all sections
counter = 0
for section in self.sections:
print "Section No. %d" % counter
print "Name: %s" % section.sectionName
# translate type
if section.elfN_shdr.sh_type in SH_type.reverse_lookup.keys():
print "Type: %s" \
% SH_type.reverse_lookup[section.elfN_shdr.sh_type]
else:
print "Unknown Type: 0x%x (%d)" \
% (section.elfN_shdr.sh_type, section.elfN_shdr.sh_type)
print "Addr: 0x%x" % section.elfN_shdr.sh_addr
print "Off: 0x%x" % section.elfN_shdr.sh_offset
print "Size: 0x%x (%d)" \
% (section.elfN_shdr.sh_size, section.elfN_shdr.sh_size)
print "ES: %d" % section.elfN_shdr.sh_entsize
# translate flags
temp = ""
if (section.elfN_shdr.sh_flags & SH_flags.SHF_WRITE) != 0:
temp += "W"
if (section.elfN_shdr.sh_flags & SH_flags.SHF_ALLOC) != 0:
temp += "A"
if (section.elfN_shdr.sh_flags & SH_flags.SHF_EXECINSTR) != 0:
temp += "X"
print "FLG: %s" % temp
print "Lk: %d" % section.elfN_shdr.sh_link
print "Inf: %d" % section.elfN_shdr.sh_info
print "Al: %d" % section.elfN_shdr.sh_addralign
print
counter += 1
# output of all segments
counter = 0
for segment in self.segments:
print "Segment No. %d" % counter
# translate type
if segment.elfN_Phdr.p_type in P_type.reverse_lookup.keys():
print "Type: %s" \
% P_type.reverse_lookup[segment.elfN_Phdr.p_type]
else:
print "Unknown Type: 0x%x (%d)" \
% (segment.elfN_Phdr.p_type, segment.elfN_Phdr.p_type)
print "Offset: 0x%x" % segment.elfN_Phdr.p_offset
print "Virtual Addr: 0x%x" % segment.elfN_Phdr.p_vaddr
print "Physical Addr: 0x%x" % segment.elfN_Phdr.p_paddr
print "File Size: 0x%x (%d)" \
% (segment.elfN_Phdr.p_filesz, segment.elfN_Phdr.p_filesz)
print "Mem Size: 0x%x (%d)" \
% (segment.elfN_Phdr.p_memsz, segment.elfN_Phdr.p_memsz)
# translate flags
temp = ""
if (segment.elfN_Phdr.p_flags & P_flags.PF_R) != 0:
temp += "R"
if (segment.elfN_Phdr.p_flags & P_flags.PF_W) != | |
= _adfrcc.cvar.Element_Np
Element.Pu = _adfrcc.cvar.Element_Pu
Element.Am = _adfrcc.cvar.Element_Am
Element.Cm = _adfrcc.cvar.Element_Cm
Element.Bk = _adfrcc.cvar.Element_Bk
Element.Cf = _adfrcc.cvar.Element_Cf
Element.Es = _adfrcc.cvar.Element_Es
Element.Fm = _adfrcc.cvar.Element_Fm
Element.Md = _adfrcc.cvar.Element_Md
Element.No = _adfrcc.cvar.Element_No
Element.Lr = _adfrcc.cvar.Element_Lr
Element.Rf = _adfrcc.cvar.Element_Rf
Element.Db = _adfrcc.cvar.Element_Db
Element.Sg = _adfrcc.cvar.Element_Sg
Element.Bh = _adfrcc.cvar.Element_Bh
Element.Hs = _adfrcc.cvar.Element_Hs
Element.Mt = _adfrcc.cvar.Element_Mt
Element.Ds = _adfrcc.cvar.Element_Ds
Element.Rg = _adfrcc.cvar.Element_Rg
Element.Cn = _adfrcc.cvar.Element_Cn
_adfrcc.HBA_NON_swigconstant(_adfrcc)
HBA_NON = _adfrcc.HBA_NON
_adfrcc.HBA_SPHERE_swigconstant(_adfrcc)
HBA_SPHERE = _adfrcc.HBA_SPHERE
_adfrcc.HBA_DIR1_swigconstant(_adfrcc)
HBA_DIR1 = _adfrcc.HBA_DIR1
_adfrcc.HBA_DIR2_swigconstant(_adfrcc)
HBA_DIR2 = _adfrcc.HBA_DIR2
def isAcceptor(acceptorType):
return _adfrcc.isAcceptor(acceptorType)
isAcceptor = _adfrcc.isAcceptor
def isDirectionalAcceptor(acceptorType):
return _adfrcc.isDirectionalAcceptor(acceptorType)
isDirectionalAcceptor = _adfrcc.isDirectionalAcceptor
def numAcceptorDirs(acceptorType):
return _adfrcc.numAcceptorDirs(acceptorType)
numAcceptorDirs = _adfrcc.numAcceptorDirs
def parseAcceptorType(acceptorType, str):
return _adfrcc.parseAcceptorType(acceptorType, str)
parseAcceptorType = _adfrcc.parseAcceptorType
def acceptorTypeDebugNameStr(acceptorType):
return _adfrcc.acceptorTypeDebugNameStr(acceptorType)
acceptorTypeDebugNameStr = _adfrcc.acceptorTypeDebugNameStr
_adfrcc.HBD_NON_swigconstant(_adfrcc)
HBD_NON = _adfrcc.HBD_NON
_adfrcc.HBD_SPHERE_swigconstant(_adfrcc)
HBD_SPHERE = _adfrcc.HBD_SPHERE
_adfrcc.HBD_DIR_swigconstant(_adfrcc)
HBD_DIR = _adfrcc.HBD_DIR
def isDonor(donorType):
return _adfrcc.isDonor(donorType)
isDonor = _adfrcc.isDonor
def isDirectionalDonor(donorType):
return _adfrcc.isDirectionalDonor(donorType)
isDirectionalDonor = _adfrcc.isDirectionalDonor
def parseDonorType(donorType, str):
return _adfrcc.parseDonorType(donorType, str)
parseDonorType = _adfrcc.parseDonorType
def donorTypeDebugNameStr(donorType):
return _adfrcc.donorTypeDebugNameStr(donorType)
donorTypeDebugNameStr = _adfrcc.donorTypeDebugNameStr
class AtomType(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, AtomType, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, AtomType, name)
__repr__ = _swig_repr
def __init__(self):
this = _adfrcc.new_AtomType()
try:
self.this.append(this)
except:
self.this = this
__swig_destroy__ = _adfrcc.delete_AtomType
__del__ = lambda self: None
def registerOverride(self, otherAtomTypeIndex, equilibriumDist, wellDepth, energyPowN, energyPowM, hbCosAlphaPow, hbCosBetaPow):
return _adfrcc.AtomType_registerOverride(self, otherAtomTypeIndex, equilibriumDist, wellDepth, energyPowN, energyPowM, hbCosAlphaPow, hbCosBetaPow)
def finalizeParameters(self):
return _adfrcc.AtomType_finalizeParameters(self)
def __eq__(self, other):
return _adfrcc.AtomType___eq__(self, other)
def __ne__(self, other):
return _adfrcc.AtomType___ne__(self, other)
def hbondsWith(self, other):
return _adfrcc.AtomType_hbondsWith(self, other)
def debugNameStr(self):
return _adfrcc.AtomType_debugNameStr(self)
def printDebugDescription(self):
return _adfrcc.AtomType_printDebugDescription(self)
__swig_setmethods__["atomTypeName"] = _adfrcc.AtomType_atomTypeName_set
__swig_getmethods__["atomTypeName"] = _adfrcc.AtomType_atomTypeName_get
if _newclass:
atomTypeName = _swig_property(_adfrcc.AtomType_atomTypeName_get, _adfrcc.AtomType_atomTypeName_set)
__swig_setmethods__["atomTypeIndex"] = _adfrcc.AtomType_atomTypeIndex_set
__swig_getmethods__["atomTypeIndex"] = _adfrcc.AtomType_atomTypeIndex_get
if _newclass:
atomTypeIndex = _swig_property(_adfrcc.AtomType_atomTypeIndex_get, _adfrcc.AtomType_atomTypeIndex_set)
__swig_setmethods__["Rii"] = _adfrcc.AtomType_Rii_set
__swig_getmethods__["Rii"] = _adfrcc.AtomType_Rii_get
if _newclass:
Rii = _swig_property(_adfrcc.AtomType_Rii_get, _adfrcc.AtomType_Rii_set)
__swig_setmethods__["epsii"] = _adfrcc.AtomType_epsii_set
__swig_getmethods__["epsii"] = _adfrcc.AtomType_epsii_get
if _newclass:
epsii = _swig_property(_adfrcc.AtomType_epsii_get, _adfrcc.AtomType_epsii_set)
__swig_setmethods__["solvVolume"] = _adfrcc.AtomType_solvVolume_set
__swig_getmethods__["solvVolume"] = _adfrcc.AtomType_solvVolume_get
if _newclass:
solvVolume = _swig_property(_adfrcc.AtomType_solvVolume_get, _adfrcc.AtomType_solvVolume_set)
__swig_setmethods__["solvParameter"] = _adfrcc.AtomType_solvParameter_set
__swig_getmethods__["solvParameter"] = _adfrcc.AtomType_solvParameter_get
if _newclass:
solvParameter = _swig_property(_adfrcc.AtomType_solvParameter_get, _adfrcc.AtomType_solvParameter_set)
__swig_setmethods__["RijHb"] = _adfrcc.AtomType_RijHb_set
__swig_getmethods__["RijHb"] = _adfrcc.AtomType_RijHb_get
if _newclass:
RijHb = _swig_property(_adfrcc.AtomType_RijHb_get, _adfrcc.AtomType_RijHb_set)
__swig_setmethods__["epsijHb"] = _adfrcc.AtomType_epsijHb_set
__swig_getmethods__["epsijHb"] = _adfrcc.AtomType_epsijHb_get
if _newclass:
epsijHb = _swig_property(_adfrcc.AtomType_epsijHb_get, _adfrcc.AtomType_epsijHb_set)
__swig_setmethods__["hbDonorType"] = _adfrcc.AtomType_hbDonorType_set
__swig_getmethods__["hbDonorType"] = _adfrcc.AtomType_hbDonorType_get
if _newclass:
hbDonorType = _swig_property(_adfrcc.AtomType_hbDonorType_get, _adfrcc.AtomType_hbDonorType_set)
__swig_setmethods__["hbAcceptorType"] = _adfrcc.AtomType_hbAcceptorType_set
__swig_getmethods__["hbAcceptorType"] = _adfrcc.AtomType_hbAcceptorType_get
if _newclass:
hbAcceptorType = _swig_property(_adfrcc.AtomType_hbAcceptorType_get, _adfrcc.AtomType_hbAcceptorType_set)
__swig_setmethods__["canHbond"] = _adfrcc.AtomType_canHbond_set
__swig_getmethods__["canHbond"] = _adfrcc.AtomType_canHbond_get
if _newclass:
canHbond = _swig_property(_adfrcc.AtomType_canHbond_get, _adfrcc.AtomType_canHbond_set)
__swig_setmethods__["isHbDonor"] = _adfrcc.AtomType_isHbDonor_set
__swig_getmethods__["isHbDonor"] = _adfrcc.AtomType_isHbDonor_get
if _newclass:
isHbDonor = _swig_property(_adfrcc.AtomType_isHbDonor_get, _adfrcc.AtomType_isHbDonor_set)
__swig_setmethods__["isHbDonorNonH"] = _adfrcc.AtomType_isHbDonorNonH_set
__swig_getmethods__["isHbDonorNonH"] = _adfrcc.AtomType_isHbDonorNonH_get
if _newclass:
isHbDonorNonH = _swig_property(_adfrcc.AtomType_isHbDonorNonH_get, _adfrcc.AtomType_isHbDonorNonH_set)
__swig_setmethods__["isHbDirectionalDonor"] = _adfrcc.AtomType_isHbDirectionalDonor_set
__swig_getmethods__["isHbDirectionalDonor"] = _adfrcc.AtomType_isHbDirectionalDonor_get
if _newclass:
isHbDirectionalDonor = _swig_property(_adfrcc.AtomType_isHbDirectionalDonor_get, _adfrcc.AtomType_isHbDirectionalDonor_set)
__swig_setmethods__["isHbDirectionalDonorNonH"] = _adfrcc.AtomType_isHbDirectionalDonorNonH_set
__swig_getmethods__["isHbDirectionalDonorNonH"] = _adfrcc.AtomType_isHbDirectionalDonorNonH_get
if _newclass:
isHbDirectionalDonorNonH = _swig_property(_adfrcc.AtomType_isHbDirectionalDonorNonH_get, _adfrcc.AtomType_isHbDirectionalDonorNonH_set)
__swig_setmethods__["isHbAcceptor"] = _adfrcc.AtomType_isHbAcceptor_set
__swig_getmethods__["isHbAcceptor"] = _adfrcc.AtomType_isHbAcceptor_get
if _newclass:
isHbAcceptor = _swig_property(_adfrcc.AtomType_isHbAcceptor_get, _adfrcc.AtomType_isHbAcceptor_set)
__swig_setmethods__["isHbDirectionalAcceptor"] = _adfrcc.AtomType_isHbDirectionalAcceptor_set
__swig_getmethods__["isHbDirectionalAcceptor"] = _adfrcc.AtomType_isHbDirectionalAcceptor_get
if _newclass:
isHbDirectionalAcceptor = _swig_property(_adfrcc.AtomType_isHbDirectionalAcceptor_get, _adfrcc.AtomType_isHbDirectionalAcceptor_set)
__swig_setmethods__["numAcceptorDirs"] = _adfrcc.AtomType_numAcceptorDirs_set
__swig_getmethods__["numAcceptorDirs"] = _adfrcc.AtomType_numAcceptorDirs_get
if _newclass:
numAcceptorDirs = _swig_property(_adfrcc.AtomType_numAcceptorDirs_get, _adfrcc.AtomType_numAcceptorDirs_set)
__swig_setmethods__["bondIndex"] = _adfrcc.AtomType_bondIndex_set
__swig_getmethods__["bondIndex"] = _adfrcc.AtomType_bondIndex_get
if _newclass:
bondIndex = _swig_property(_adfrcc.AtomType_bondIndex_get, _adfrcc.AtomType_bondIndex_set)
__swig_setmethods__["includeInScoring"] = _adfrcc.AtomType_includeInScoring_set
__swig_getmethods__["includeInScoring"] = _adfrcc.AtomType_includeInScoring_get
if _newclass:
includeInScoring = _swig_property(_adfrcc.AtomType_includeInScoring_get, _adfrcc.AtomType_includeInScoring_set)
__swig_setmethods__["element"] = _adfrcc.AtomType_element_set
__swig_getmethods__["element"] = _adfrcc.AtomType_element_get
if _newclass:
element = _swig_property(_adfrcc.AtomType_element_get, _adfrcc.AtomType_element_set)
__swig_setmethods__["isHydrogen"] = _adfrcc.AtomType_isHydrogen_set
__swig_getmethods__["isHydrogen"] = _adfrcc.AtomType_isHydrogen_get
if _newclass:
isHydrogen = _swig_property(_adfrcc.AtomType_isHydrogen_get, _adfrcc.AtomType_isHydrogen_set)
__swig_setmethods__["numAtomTypes"] = _adfrcc.AtomType_numAtomTypes_set
__swig_getmethods__["numAtomTypes"] = _adfrcc.AtomType_numAtomTypes_get
if _newclass:
numAtomTypes = _swig_property(_adfrcc.AtomType_numAtomTypes_get, _adfrcc.AtomType_numAtomTypes_set)
__swig_setmethods__["isHbondingPair"] = _adfrcc.AtomType_isHbondingPair_set
__swig_getmethods__["isHbondingPair"] = _adfrcc.AtomType_isHbondingPair_get
if _newclass:
isHbondingPair = _swig_property(_adfrcc.AtomType_isHbondingPair_get, _adfrcc.AtomType_isHbondingPair_set)
__swig_setmethods__["equilibriumDist"] = _adfrcc.AtomType_equilibriumDist_set
__swig_getmethods__["equilibriumDist"] = _adfrcc.AtomType_equilibriumDist_get
if _newclass:
equilibriumDist = _swig_property(_adfrcc.AtomType_equilibriumDist_get, _adfrcc.AtomType_equilibriumDist_set)
__swig_setmethods__["wellDepth"] = _adfrcc.AtomType_wellDepth_set
__swig_getmethods__["wellDepth"] = _adfrcc.AtomType_wellDepth_get
if _newclass:
wellDepth = _swig_property(_adfrcc.AtomType_wellDepth_get, _adfrcc.AtomType_wellDepth_set)
__swig_setmethods__["energyPowN"] = _adfrcc.AtomType_energyPowN_set
__swig_getmethods__["energyPowN"] = _adfrcc.AtomType_energyPowN_get
if _newclass:
energyPowN = _swig_property(_adfrcc.AtomType_energyPowN_get, _adfrcc.AtomType_energyPowN_set)
__swig_setmethods__["energyPowM"] = _adfrcc.AtomType_energyPowM_set
__swig_getmethods__["energyPowM"] = _adfrcc.AtomType_energyPowM_get
if _newclass:
energyPowM = _swig_property(_adfrcc.AtomType_energyPowM_get, _adfrcc.AtomType_energyPowM_set)
__swig_setmethods__["energyFactorN"] = _adfrcc.AtomType_energyFactorN_set
__swig_getmethods__["energyFactorN"] = _adfrcc.AtomType_energyFactorN_get
if _newclass:
energyFactorN = _swig_property(_adfrcc.AtomType_energyFactorN_get, _adfrcc.AtomType_energyFactorN_set)
__swig_setmethods__["energyFactorM"] = _adfrcc.AtomType_energyFactorM_set
__swig_getmethods__["energyFactorM"] = _adfrcc.AtomType_energyFactorM_get
if _newclass:
energyFactorM = _swig_property(_adfrcc.AtomType_energyFactorM_get, _adfrcc.AtomType_energyFactorM_set)
__swig_setmethods__["hbCosAlphaPow"] = _adfrcc.AtomType_hbCosAlphaPow_set
__swig_getmethods__["hbCosAlphaPow"] = _adfrcc.AtomType_hbCosAlphaPow_get
if _newclass:
hbCosAlphaPow = _swig_property(_adfrcc.AtomType_hbCosAlphaPow_get, _adfrcc.AtomType_hbCosAlphaPow_set)
__swig_setmethods__["hbCosBetaPow"] = _adfrcc.AtomType_hbCosBetaPow_set
__swig_getmethods__["hbCosBetaPow"] = _adfrcc.AtomType_hbCosBetaPow_get
if _newclass:
hbCosBetaPow = _swig_property(_adfrcc.AtomType_hbCosBetaPow_get, _adfrcc.AtomType_hbCosBetaPow_set)
__swig_setmethods__["hbWorstAngleCosToAlphaPow"] = _adfrcc.AtomType_hbWorstAngleCosToAlphaPow_set
__swig_getmethods__["hbWorstAngleCosToAlphaPow"] = _adfrcc.AtomType_hbWorstAngleCosToAlphaPow_get
if _newclass:
hbWorstAngleCosToAlphaPow = _swig_property(_adfrcc.AtomType_hbWorstAngleCosToAlphaPow_get, _adfrcc.AtomType_hbWorstAngleCosToAlphaPow_set)
__swig_setmethods__["hbWorstAngleCosToBetaPow"] = _adfrcc.AtomType_hbWorstAngleCosToBetaPow_set
__swig_getmethods__["hbWorstAngleCosToBetaPow"] = _adfrcc.AtomType_hbWorstAngleCosToBetaPow_get
if _newclass:
hbWorstAngleCosToBetaPow = _swig_property(_adfrcc.AtomType_hbWorstAngleCosToBetaPow_get, _adfrcc.AtomType_hbWorstAngleCosToBetaPow_set)
AtomType_swigregister = _adfrcc.AtomType_swigregister
AtomType_swigregister(AtomType)
class Randomize(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, Randomize, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, Randomize, name)
__repr__ = _swig_repr
__swig_getmethods__["setRandSeed"] = lambda x: _adfrcc.Randomize_setRandSeed
if _newclass:
setRandSeed = staticmethod(_adfrcc.Randomize_setRandSeed)
__swig_getmethods__["randFloatBetween"] = lambda x: _adfrcc.Randomize_randFloatBetween
if _newclass:
randFloatBetween = staticmethod(_adfrcc.Randomize_randFloatBetween)
__swig_getmethods__["randIntLessThan"] = lambda x: _adfrcc.Randomize_randIntLessThan
if _newclass:
randIntLessThan = staticmethod(_adfrcc.Randomize_randIntLessThan)
__swig_getmethods__["randGaussian"] = lambda x: _adfrcc.Randomize_randGaussian
if _newclass:
randGaussian = staticmethod(_adfrcc.Randomize_randGaussian)
def __init__(self):
this = _adfrcc.new_Randomize()
try:
self.this.append(this)
except:
self.this = this
__swig_destroy__ = _adfrcc.delete_Randomize
__del__ = lambda self: None
Randomize_swigregister = _adfrcc.Randomize_swigregister
Randomize_swigregister(Randomize)
def Randomize_setRandSeed(seed=-1):
return _adfrcc.Randomize_setRandSeed(seed)
Randomize_setRandSeed = _adfrcc.Randomize_setRandSeed
def Randomize_randFloatBetween(low, high):
return _adfrcc.Randomize_randFloatBetween(low, high)
Randomize_randFloatBetween = _adfrcc.Randomize_randFloatBetween
def Randomize_randIntLessThan(lessThan):
return _adfrcc.Randomize_randIntLessThan(lessThan)
Randomize_randIntLessThan = _adfrcc.Randomize_randIntLessThan
def Randomize_randGaussian(mean, stdDev):
return _adfrcc.Randomize_randGaussian(mean, stdDev)
Randomize_randGaussian = _adfrcc.Randomize_randGaussian
class NamedObject(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, NamedObject, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, NamedObject, name)
__repr__ = _swig_repr
def __init__(self):
this = _adfrcc.new_NamedObject()
try:
self.this.append(this)
except:
self.this = this
__swig_destroy__ = _adfrcc.delete_NamedObject
__del__ = lambda self: None
def setName(self, aName):
return _adfrcc.NamedObject_setName(self, aName)
def getName(self):
return _adfrcc.NamedObject_getName(self)
NamedObject_swigregister = _adfrcc.NamedObject_swigregister
NamedObject_swigregister(NamedObject)
class Parameters(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, Parameters, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, Parameters, name)
def __init__(self, *args, **kwargs):
raise AttributeError("No constructor defined")
__repr__ = _swig_repr
__swig_getmethods__["getParameters"] = lambda x: _adfrcc.Parameters_getParameters
if _newclass:
getParameters = staticmethod(_adfrcc.Parameters_getParameters)
__swig_destroy__ = _adfrcc.delete_Parameters
__del__ = lambda self: None
def loadDefaults(self):
return _adfrcc.Parameters_loadDefaults(self)
def loadFromDatFile(self, path):
return _adfrcc.Parameters_loadFromDatFile(self, path)
def loadMultiLineText(self, text):
return _adfrcc.Parameters_loadMultiLineText(self, text)
def clearAll(self):
return _adfrcc.Parameters_clearAll(self)
def setMaxDistVdw(self, aMaxDistVdw):
return _adfrcc.Parameters_setMaxDistVdw(self, aMaxDistVdw)
def setMaxDistHb(self, aMaxDistHb):
return _adfrcc.Parameters_setMaxDistHb(self, aMaxDistHb)
def setMaxDistEstat(self, aMaxDistEstat):
return _adfrcc.Parameters_setMaxDistEstat(self, aMaxDistEstat)
def setMaxDistSolv(self, aMaxDistSolv):
return _adfrcc.Parameters_setMaxDistSolv(self, aMaxDistSolv)
def getMaxDistVdw(self):
return _adfrcc.Parameters_getMaxDistVdw(self)
def getMaxDistHb(self):
return _adfrcc.Parameters_getMaxDistHb(self)
def getMaxDistEstat(self):
return _adfrcc.Parameters_getMaxDistEstat(self)
def getMaxDistSolv(self):
return _adfrcc.Parameters_getMaxDistSolv(self)
def setUseTables(self, aUseTables):
return _adfrcc.Parameters_setUseTables(self, aUseTables)
def setTablesInterpolate(self, aTablesInterpolate):
return _adfrcc.Parameters_setTablesInterpolate(self, aTablesInterpolate)
def setTablesIncrement(self, aTablesIncrement):
return _adfrcc.Parameters_setTablesIncrement(self, aTablesIncrement)
def setTablesMaxVdw(self, aTablesMaxVdw):
return _adfrcc.Parameters_setTablesMaxVdw(self, aTablesMaxVdw)
def setTablesMaxEstat(self, aTablesMaxEstat):
return _adfrcc.Parameters_setTablesMaxEstat(self, aTablesMaxEstat)
def setTablesMaxSolv(self, aTablesMaxSolv):
return _adfrcc.Parameters_setTablesMaxSolv(self, aTablesMaxSolv)
def getUseTables(self):
return _adfrcc.Parameters_getUseTables(self)
def getTablesInterpolate(self):
return _adfrcc.Parameters_getTablesInterpolate(self)
def getTablesIncrement(self):
return _adfrcc.Parameters_getTablesIncrement(self)
def getTablesMaxVdw(self):
return _adfrcc.Parameters_getTablesMaxVdw(self)
def getTablesMaxEstat(self):
return _adfrcc.Parameters_getTablesMaxEstat(self)
def getTablesMaxSolv(self):
return _adfrcc.Parameters_getTablesMaxSolv(self)
def getAtomTypeByIndex(self, index):
return _adfrcc.Parameters_getAtomTypeByIndex(self, index)
def getAtomTypeByName(self, name):
return _adfrcc.Parameters_getAtomTypeByName(self, name)
def printDebugDescription(self):
return _adfrcc.Parameters_printDebugDescription(self)
__swig_setmethods__["feCoeffVdw"] = _adfrcc.Parameters_feCoeffVdw_set
__swig_getmethods__["feCoeffVdw"] = _adfrcc.Parameters_feCoeffVdw_get
if _newclass:
feCoeffVdw = _swig_property(_adfrcc.Parameters_feCoeffVdw_get, _adfrcc.Parameters_feCoeffVdw_set)
__swig_setmethods__["feCoeffHbond"] = _adfrcc.Parameters_feCoeffHbond_set
__swig_getmethods__["feCoeffHbond"] = _adfrcc.Parameters_feCoeffHbond_get
if _newclass:
feCoeffHbond = _swig_property(_adfrcc.Parameters_feCoeffHbond_get, _adfrcc.Parameters_feCoeffHbond_set)
__swig_setmethods__["feCoeffEstat"] = _adfrcc.Parameters_feCoeffEstat_set
__swig_getmethods__["feCoeffEstat"] = _adfrcc.Parameters_feCoeffEstat_get
if _newclass:
feCoeffEstat = _swig_property(_adfrcc.Parameters_feCoeffEstat_get, _adfrcc.Parameters_feCoeffEstat_set)
__swig_setmethods__["feCoeffDesolv"] = _adfrcc.Parameters_feCoeffDesolv_set
__swig_getmethods__["feCoeffDesolv"] = _adfrcc.Parameters_feCoeffDesolv_get
if _newclass:
feCoeffDesolv = _swig_property(_adfrcc.Parameters_feCoeffDesolv_get, _adfrcc.Parameters_feCoeffDesolv_set)
__swig_setmethods__["feCoeffTors"] = _adfrcc.Parameters_feCoeffTors_set
__swig_getmethods__["feCoeffTors"] = _adfrcc.Parameters_feCoeffTors_get
if _newclass:
feCoeffTors = _swig_property(_adfrcc.Parameters_feCoeffTors_get, _adfrcc.Parameters_feCoeffTors_set)
__swig_setmethods__["vdwN"] = _adfrcc.Parameters_vdwN_set
__swig_getmethods__["vdwN"] = _adfrcc.Parameters_vdwN_get
if _newclass:
vdwN = _swig_property(_adfrcc.Parameters_vdwN_get, _adfrcc.Parameters_vdwN_set)
__swig_setmethods__["vdwM"] = _adfrcc.Parameters_vdwM_set
__swig_getmethods__["vdwM"] = _adfrcc.Parameters_vdwM_get
if _newclass:
vdwM = _swig_property(_adfrcc.Parameters_vdwM_get, _adfrcc.Parameters_vdwM_set)
__swig_setmethods__["hbN"] = _adfrcc.Parameters_hbN_set
__swig_getmethods__["hbN"] = _adfrcc.Parameters_hbN_get
if _newclass:
hbN = _swig_property(_adfrcc.Parameters_hbN_get, _adfrcc.Parameters_hbN_set)
__swig_setmethods__["hbM"] = _adfrcc.Parameters_hbM_set
__swig_getmethods__["hbM"] = _adfrcc.Parameters_hbM_get
if _newclass:
hbM = _swig_property(_adfrcc.Parameters_hbM_get, _adfrcc.Parameters_hbM_set)
__swig_setmethods__["maxDistVdw"] = _adfrcc.Parameters_maxDistVdw_set
__swig_getmethods__["maxDistVdw"] = _adfrcc.Parameters_maxDistVdw_get
if _newclass:
maxDistVdw = _swig_property(_adfrcc.Parameters_maxDistVdw_get, _adfrcc.Parameters_maxDistVdw_set)
__swig_setmethods__["maxDistHbond"] = _adfrcc.Parameters_maxDistHbond_set
__swig_getmethods__["maxDistHbond"] = _adfrcc.Parameters_maxDistHbond_get
if _newclass:
maxDistHbond = _swig_property(_adfrcc.Parameters_maxDistHbond_get, _adfrcc.Parameters_maxDistHbond_set)
__swig_setmethods__["maxDistEstat"] = _adfrcc.Parameters_maxDistEstat_set
__swig_getmethods__["maxDistEstat"] = _adfrcc.Parameters_maxDistEstat_get
if _newclass:
maxDistEstat = _swig_property(_adfrcc.Parameters_maxDistEstat_get, _adfrcc.Parameters_maxDistEstat_set)
__swig_setmethods__["maxDistSolv"] = _adfrcc.Parameters_maxDistSolv_set
__swig_getmethods__["maxDistSolv"] = _adfrcc.Parameters_maxDistSolv_get
if _newclass:
maxDistSolv = _swig_property(_adfrcc.Parameters_maxDistSolv_get, _adfrcc.Parameters_maxDistSolv_set)
__swig_setmethods__["useTables"] = _adfrcc.Parameters_useTables_set
__swig_getmethods__["useTables"] = _adfrcc.Parameters_useTables_get
if _newclass:
useTables = _swig_property(_adfrcc.Parameters_useTables_get, _adfrcc.Parameters_useTables_set)
__swig_setmethods__["tablesInterpolate"] = _adfrcc.Parameters_tablesInterpolate_set
__swig_getmethods__["tablesInterpolate"] = _adfrcc.Parameters_tablesInterpolate_get
if _newclass:
tablesInterpolate = _swig_property(_adfrcc.Parameters_tablesInterpolate_get, _adfrcc.Parameters_tablesInterpolate_set)
__swig_setmethods__["tablesIncrement"] = _adfrcc.Parameters_tablesIncrement_set
__swig_getmethods__["tablesIncrement"] = _adfrcc.Parameters_tablesIncrement_get
if _newclass:
tablesIncrement = _swig_property(_adfrcc.Parameters_tablesIncrement_get, _adfrcc.Parameters_tablesIncrement_set)
__swig_setmethods__["tablesMaxVdw"] = _adfrcc.Parameters_tablesMaxVdw_set
__swig_getmethods__["tablesMaxVdw"] = _adfrcc.Parameters_tablesMaxVdw_get
if _newclass:
tablesMaxVdw = _swig_property(_adfrcc.Parameters_tablesMaxVdw_get, _adfrcc.Parameters_tablesMaxVdw_set)
__swig_setmethods__["tablesMaxEstat"] = _adfrcc.Parameters_tablesMaxEstat_set
__swig_getmethods__["tablesMaxEstat"] = _adfrcc.Parameters_tablesMaxEstat_get
if _newclass:
tablesMaxEstat = _swig_property(_adfrcc.Parameters_tablesMaxEstat_get, _adfrcc.Parameters_tablesMaxEstat_set)
__swig_setmethods__["tablesMaxSolv"] = _adfrcc.Parameters_tablesMaxSolv_set
__swig_getmethods__["tablesMaxSolv"] = _adfrcc.Parameters_tablesMaxSolv_get
if _newclass:
tablesMaxSolv = _swig_property(_adfrcc.Parameters_tablesMaxSolv_get, _adfrcc.Parameters_tablesMaxSolv_set)
__swig_setmethods__["distSmoothingWidth"] = _adfrcc.Parameters_distSmoothingWidth_set
__swig_getmethods__["distSmoothingWidth"] = _adfrcc.Parameters_distSmoothingWidth_get
if _newclass:
distSmoothingWidth = _swig_property(_adfrcc.Parameters_distSmoothingWidth_get, _adfrcc.Parameters_distSmoothingWidth_set)
__swig_setmethods__["collisionDist"] = _adfrcc.Parameters_collisionDist_set
__swig_getmethods__["collisionDist"] = _adfrcc.Parameters_collisionDist_get
if _newclass:
collisionDist = _swig_property(_adfrcc.Parameters_collisionDist_get, _adfrcc.Parameters_collisionDist_set)
__swig_setmethods__["abortIfNumCollisions"] = _adfrcc.Parameters_abortIfNumCollisions_set
__swig_getmethods__["abortIfNumCollisions"] = _adfrcc.Parameters_abortIfNumCollisions_get
if _newclass:
abortIfNumCollisions = _swig_property(_adfrcc.Parameters_abortIfNumCollisions_get, _adfrcc.Parameters_abortIfNumCollisions_set)
__swig_setmethods__["hbWorstAngle"] = _adfrcc.Parameters_hbWorstAngle_set
__swig_getmethods__["hbWorstAngle"] = _adfrcc.Parameters_hbWorstAngle_get
if _newclass:
hbWorstAngle = _swig_property(_adfrcc.Parameters_hbWorstAngle_get, _adfrcc.Parameters_hbWorstAngle_set)
__swig_setmethods__["hbWorstAngleCos"] = _adfrcc.Parameters_hbWorstAngleCos_set
__swig_getmethods__["hbWorstAngleCos"] = _adfrcc.Parameters_hbWorstAngleCos_get
if _newclass:
hbWorstAngleCos = _swig_property(_adfrcc.Parameters_hbWorstAngleCos_get, _adfrcc.Parameters_hbWorstAngleCos_set)
__swig_setmethods__["qasp"] = _adfrcc.Parameters_qasp_set
__swig_getmethods__["qasp"] = _adfrcc.Parameters_qasp_get
if _newclass:
qasp = _swig_property(_adfrcc.Parameters_qasp_get, _adfrcc.Parameters_qasp_set)
__swig_setmethods__["numAtomTypes"] = _adfrcc.Parameters_numAtomTypes_set
__swig_getmethods__["numAtomTypes"] = _adfrcc.Parameters_numAtomTypes_get
if _newclass:
numAtomTypes = | |
<gh_stars>100-1000
#!/usr/bin/env python
# Copyright (C) 2014 Open Data ("Open Data" refers to
# one or more of the following companies: Open Data Partners LLC,
# Open Data Research LLC, or Open Data Capital LLC.)
#
# This file is part of Hadrian.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
from titus.fcn import Fcn
from titus.fcn import LibFcn
from titus.signature import Sig
from titus.signature import Sigs
from titus.datatype import *
from titus.errors import *
import titus.P as P
# import special functions requred to compute distributions
from titus.lib.spec import logBetaFunction
from titus.lib.spec import incompleteBetaFunction
from titus.lib.spec import inverseIncompleteBetaFunction
from titus.lib.spec import regularizedGammaQ
from titus.lib.spec import regularizedGammaP
from titus.lib.spec import nChooseK
provides = {}
def provide(fcn):
provides[fcn.name] = fcn
prefix = "prob.dist."
class GaussianLL(LibFcn):
name = prefix + "gaussianLL"
sig = Sigs([Sig([{"x": P.Double()}, {"mu": P.Double()}, {"sigma": P.Double()}], P.Double()),
Sig([{"x": P.Double()}, {"params": P.WildRecord("A", {"mean": P.Double(), "variance": P.Double()})}], P.Double())])
errcodeBase = 13000
def __call__(self, state, scope, pos, paramTypes, x, *others):
if len(others) == 2:
mu, sigma = others
else:
mu = others[0]["mean"]
if math.isnan(others[0]["variance"]) or others[0]["variance"] < 0.0:
raise PFARuntimeException("invalid parameterization", self.errcodeBase + 0, self.name, pos)
else:
sigma = math.sqrt(others[0]["variance"])
if math.isinf(mu) or math.isnan(mu) or math.isinf(sigma) or math.isnan(sigma) or sigma < 0.0:
raise PFARuntimeException("invalid parameterization", self.errcodeBase + 0, self.name, pos)
elif math.isinf(x) or math.isnan(x):
raise PFARuntimeException("invalid input", self.errcodeBase + 1, self.name, pos)
elif sigma == 0.0:
if x != mu:
return float("-inf")
else:
return float("inf")
else:
return GaussianDistribution(mu, sigma, self.errcodeBase + 0, self.name, pos).LL(x)
provide(GaussianLL())
class GaussianCDF(LibFcn):
name = prefix + "gaussianCDF"
sig = Sigs([Sig([{"x": P.Double()}, {"mu": P.Double()}, {"sigma": P.Double()}], P.Double()),
Sig([{"x": P.Double()}, {"params": P.WildRecord("A", {"mean": P.Double(), "variance": P.Double()})}], P.Double())])
errcodeBase = 13010
def __call__(self, state, scope, pos, paramTypes, x, *others):
if len(others) == 2:
mu, sigma = others
else:
mu = others[0]["mean"]
if math.isnan(others[0]["variance"]) or others[0]["variance"] < 0.0:
raise PFARuntimeException("invalid parameterization", self.errcodeBase + 0, self.name, pos)
else:
sigma = math.sqrt(others[0]["variance"])
if math.isinf(mu) or math.isnan(mu) or math.isinf(sigma) or math.isnan(sigma) or sigma < 0.0:
raise PFARuntimeException("invalid parameterization", self.errcodeBase + 0, self.name, pos)
elif math.isinf(x) or math.isnan(x):
raise PFARuntimeException("invalid input", self.errcodeBase + 1, self.name, pos)
elif sigma == 0.0:
if x < mu:
return 0.0
else:
return 1.0
else:
return GaussianDistribution(mu, sigma, self.errcodeBase + 0, self.name, pos).CDF(x)
provide(GaussianCDF())
# written using http://www.johndcook.com/normal_cdf_inverse.html
class GaussianQF(LibFcn):
name = prefix + "gaussianQF"
sig = Sigs([Sig([{"p": P.Double()}, {"mu": P.Double()}, {"sigma": P.Double()}], P.Double()),
Sig([{"p": P.Double()}, {"params": P.WildRecord("A", {"mean": P.Double(), "variance": P.Double()})}], P.Double())])
errcodeBase = 13020
def __call__(self, state, scope, pos, paramTypes, p, *others):
if len(others) == 2:
mu, sigma = others
else:
mu = others[0]["mean"]
if math.isnan(others[0]["variance"]) or others[0]["variance"] < 0.0:
raise PFARuntimeException("invalid parameterization", self.errcodeBase + 0, self.name, pos)
else:
sigma = math.sqrt(others[0]["variance"])
if math.isinf(mu) or math.isnan(mu) or math.isinf(sigma) or math.isnan(sigma) or sigma < 0.0:
raise PFARuntimeException("invalid parameterization", self.errcodeBase + 0, self.name, pos)
elif not (0.0 <= p <= 1.0):
raise PFARuntimeException("invalid input", self.errcodeBase + 1, self.name, pos)
elif p == 1.0:
return float("inf")
elif p == 0.0:
return float("-inf")
elif sigma == 0.0:
return mu
else:
return GaussianDistribution(mu, sigma, self.errcodeBase + 0, self.name, pos).QF(p)
provide(GaussianQF())
################ Exponential
class ExponentialPDF(LibFcn):
name = prefix + "exponentialPDF"
sig = Sig([{"x": P.Double()}, {"lambda": P.Double()}], P.Double())
errcodeBase = 13030
def __call__(self, state, scope, pos, paramTypes, x, rate):
if math.isinf(rate) or math.isnan(rate) or rate < 0.0:
raise PFARuntimeException("invalid parameterization", self.errcodeBase + 0, self.name, pos)
elif math.isinf(x) or math.isnan(x):
raise PFARuntimeException("invalid input", self.errcodeBase + 1, self.name, pos)
elif rate == 0.0:
return 0.0
elif x < 0.0:
return 0.0
elif x == 0.0:
return rate
else:
return ExponentialDistribution(rate, self.errcodeBase + 0, self.name, pos).PDF(x)
provide(ExponentialPDF())
class ExponentialCDF(LibFcn):
name = prefix + "exponentialCDF"
sig = Sig([{"x": P.Double()}, {"lambda": P.Double()}], P.Double())
errcodeBase = 13040
def __call__(self, state, scope, pos, paramTypes, x, rate):
if math.isinf(rate) or math.isnan(rate) or rate < 0.0:
raise PFARuntimeException("invalid parameterization", self.errcodeBase + 0, self.name, pos)
elif math.isinf(x) or math.isnan(x):
raise PFARuntimeException("invalid input", self.errcodeBase + 1, self.name, pos)
elif rate == 0.0 or x == 0.0:
return 0.0
elif x <= 0.0:
return 0.0
else:
return ExponentialDistribution(rate, self.errcodeBase + 0, self.name, pos).CDF(x)
provide(ExponentialCDF())
class ExponentialQF(LibFcn):
name = prefix + "exponentialQF"
sig = Sig([{"p": P.Double()}, {"lambda": P.Double()}], P.Double())
errcodeBase = 13050
def __call__(self, state, scope, pos, paramTypes, p, rate):
if math.isinf(rate) or math.isnan(rate) or rate < 0.0:
raise PFARuntimeException("invalid parameterization", self.errcodeBase + 0, self.name, pos)
elif not (0.0 <= p <= 1.0):
raise PFARuntimeException("invalid input", self.errcodeBase + 1, self.name, pos)
elif rate == 0.0 and p == 0.0:
return 0.0
elif rate == 0.0 and p > 0:
return float("inf")
elif p == 1.0:
return float("inf")
elif p == 0.0:
return 0.0
else:
return ExponentialDistribution(rate, self.errcodeBase + 0, self.name, pos).QF(p)
provide(ExponentialQF())
################ Chi2
class Chi2PDF(LibFcn):
name = prefix + "chi2PDF"
sig = Sig([{"x": P.Double()}, {"dof": P.Int()}], P.Double())
errcodeBase = 13060
def __call__(self, state, scope, pos, paramTypes, x, df):
if df < 0:
raise PFARuntimeException("invalid parameterization", self.errcodeBase + 0, self.name, pos)
elif math.isinf(x) or math.isnan(x):
raise PFARuntimeException("invalid input", self.errcodeBase + 1, self.name, pos)
elif df == 0:
if x != 0:
return 0.0
else:
return float("inf")
elif x <= 0.0:
return 0.0
else:
return Chi2Distribution(df, self.errcodeBase + 0, self.name, pos).PDF(x)
provide(Chi2PDF())
class Chi2CDF(LibFcn):
name = prefix + "chi2CDF"
sig = Sig([{"x": P.Double()}, {"dof": P.Int()}], P.Double())
errcodeBase = 13070
def __call__(self, state, scope, pos, paramTypes, x, df):
if df < 0:
raise PFARuntimeException("invalid parameterization", self.errcodeBase + 0, self.name, pos)
elif math.isinf(x) or math.isnan(x):
raise PFARuntimeException("invalid input", self.errcodeBase + 1, self.name, pos)
elif df == 0:
if x > 0:
return 1.0
else:
return 0.0
else:
return Chi2Distribution(df, self.errcodeBase + 0, self.name, pos).CDF(x)
provide(Chi2CDF())
class Chi2QF(LibFcn):
name = prefix + "chi2QF"
sig = Sig([{"p": P.Double()}, {"dof": P.Int()}], P.Double())
errcodeBase = 13080
def __call__(self, state, scope, pos, paramTypes, p, df):
if df < 0:
raise PFARuntimeException("invalid parameterization", self.errcodeBase + 0, self.name, pos)
elif not (0.0 <= p <= 1.0):
raise PFARuntimeException("invalid input", self.errcodeBase + 1, self.name, pos)
elif p == 1.0:
return float("inf")
elif df == 0:
return 0.0
elif p == 0.0:
return 0.0
else:
return Chi2Distribution(df, self.errcodeBase + 0, self.name, pos).QF(p)
provide(Chi2QF())
################ Poisson #######################################
class PoissonPDF(LibFcn):
name = prefix + "poissonPDF"
sig = Sig([{"x": P.Int()}, {"lambda": P.Double()}], P.Double())
errcodeBase = 13090
def __call__(self, state, scope, pos, paramTypes, x, lamda):
if math.isinf(lamda) or math.isnan(lamda) or lamda < 0.0:
raise PFARuntimeException("invalid parameterization", self.errcodeBase + 0, self.name, pos)
elif lamda == 0:
if x != 0:
return 0.0
else:
return 1.0
elif x < 0:
return 0.0
else:
return PoissonDistribution(lamda, self.errcodeBase + 0, self.name, pos).PDF(x)
provide(PoissonPDF())
class PoissonCDF(LibFcn):
name = prefix + "poissonCDF"
sig = Sig([{"x": P.Int()}, {"lambda": P.Double()}], P.Double())
errcodeBase = 13100
def __call__(self, state, scope, pos, paramTypes, x, lamda):
if math.isinf(lamda) or math.isnan(lamda) or lamda < 0:
raise PFARuntimeException("invalid parameterization", self.errcodeBase + 0, self.name, pos)
elif math.isinf(x) or math.isnan(x):
raise PFARuntimeException("invalid input", self.errcodeBase + 1, self.name, pos)
elif lamda == 0:
if x >= 0:
return 1.0
else:
return 0.0
else:
return PoissonDistribution(lamda, self.errcodeBase + 0, self.name, pos).CDF(x)
provide(PoissonCDF())
class PoissonQF(LibFcn):
name = prefix + "poissonQF"
sig = Sig([{"p": P.Double()}, {"lambda": P.Double()}], P.Double())
errcodeBase = 13110
def __call__(self, state, scope, pos, paramTypes, p, lamda):
if math.isinf(lamda) or math.isnan(lamda) or lamda < 0:
raise PFARuntimeException("invalid parameterization", self.errcodeBase + 0, self.name, pos)
elif not (0.0 <= p <= 1.0):
raise PFARuntimeException("invalid input", self.errcodeBase + 1, self.name, pos)
elif lamda == 0:
return 0.0
elif p == 1:
return float("inf")
elif p == 0:
return 0.0
else:
return PoissonDistribution(lamda, self.errcodeBase + 0, self.name, pos).QF(p)
provide(PoissonQF())
################ Gamma
class GammaPDF(LibFcn):
name = prefix + "gammaPDF"
sig = Sig([{"x": P.Double()}, {"shape": P.Double()}, {"scale": P.Double()}], P.Double())
errcodeBase = 13120
def __call__(self, state, scope, pos, paramTypes, x, shape, scale):
if math.isinf(shape) or math.isnan(shape) or math.isinf(scale) or math.isnan(scale) or shape < 0 or scale < 0:
raise PFARuntimeException("invalid | |
difference is only in the sign of the eigenvector.
.. note:: See :func:`torch.linalg.eigvalsh` for a related function that computes only eigenvalues.
However, that function is not differentiable.
Args:
input (Tensor): the Hermitian `n \times n` matrix or the batch of such matrices of size
`(*, n, n)` where `*` is one or more batch dimensions.
UPLO ('L', 'U', optional): controls whether to use the upper-triangular or the lower-triangular part
of :attr:`input` in the computations. Default is ``'L'``.
Keyword args:
out (tuple, optional): tuple of two tensors to write the output to. Default is ``None``.
Returns:
(Tensor, Tensor): A namedtuple (eigenvalues, eigenvectors) containing
- **eigenvalues** (*Tensor*): Shape `(*, m)`.
The eigenvalues in ascending order.
- **eigenvectors** (*Tensor*): Shape `(*, m, m)`.
The orthonormal eigenvectors of the :attr:`input`.
Examples::
>>> a = torch.randn(2, 2, dtype=torch.complex128)
>>> a = a + a.t().conj() # creates a Hermitian matrix
>>> a
tensor([[2.9228+0.0000j, 0.2029-0.0862j],
[0.2029+0.0862j, 0.3464+0.0000j]], dtype=torch.complex128)
>>> w, v = torch.linalg.eigh(a)
>>> w
tensor([0.3277, 2.9415], dtype=torch.float64)
>>> v
tensor([[-0.0846+-0.0000j, -0.9964+0.0000j],
[ 0.9170+0.3898j, -0.0779-0.0331j]], dtype=torch.complex128)
>>> torch.allclose(torch.matmul(v, torch.matmul(w.to(v.dtype).diag_embed(), v.t().conj())), a)
True
>>> a = torch.randn(3, 2, 2, dtype=torch.float64)
>>> a = a + a.transpose(-2, -1) # creates a symmetric matrix
>>> w, v = torch.linalg.eigh(a)
>>> torch.allclose(torch.matmul(v, torch.matmul(w.diag_embed(), v.transpose(-2, -1))), a)
True
""")
eigvalsh = _add_docstr(_linalg.linalg_eigvalsh, r"""
linalg.eigvalsh(input, UPLO='L', *, out=None) -> Tensor
Computes the eigenvalues of a complex Hermitian (or real symmetric) matrix :attr:`input`,
or of each such matrix in a batched :attr:`input`. The eigenvalues are returned in ascending order.
Since the matrix or matrices in :attr:`input` are assumed to be Hermitian, the imaginary part of their diagonals
is always treated as zero. When :attr:`UPLO` is "L", its default value, only the lower triangular part of
each matrix is used in the computation. When :attr:`UPLO` is "U" only the upper triangular part of each matrix is used.
Supports input of float, double, cfloat and cdouble dtypes.
.. note:: When given inputs on a CUDA device, this function synchronizes that device with the CPU.
.. note:: The eigenvalues are computed using LAPACK's `syevd` and `heevd` routines for CPU inputs,
and MAGMA's `syevd` and `heevd` routines for CUDA inputs.
.. note:: The eigenvalues of real symmetric or complex Hermitian matrices are always real.
.. note:: This function doesn't support backpropagation, please use :func:`torch.linalg.eigh` instead,
which also computes the eigenvectors.
.. note:: See :func:`torch.linalg.eigh` for a related function that computes both eigenvalues and eigenvectors.
Args:
input (Tensor): the Hermitian `n \times n` matrix or the batch
of such matrices of size `(*, n, n)` where `*` is one or more batch dimensions.
UPLO ('L', 'U', optional): controls whether to use the upper-triangular or the lower-triangular part
of :attr:`input` in the computations. Default is ``'L'``.
Keyword args:
out (Tensor, optional): tensor to write the output to. Default is ``None``.
Examples::
>>> a = torch.randn(2, 2, dtype=torch.complex128)
>>> a = a + a.t().conj() # creates a Hermitian matrix
>>> a
tensor([[2.9228+0.0000j, 0.2029-0.0862j],
[0.2029+0.0862j, 0.3464+0.0000j]], dtype=torch.complex128)
>>> w = torch.linalg.eigvalsh(a)
>>> w
tensor([0.3277, 2.9415], dtype=torch.float64)
>>> a = torch.randn(3, 2, 2, dtype=torch.float64)
>>> a = a + a.transpose(-2, -1) # creates a symmetric matrix
>>> a
tensor([[[ 2.8050, -0.3850],
[-0.3850, 3.2376]],
[[-1.0307, -2.7457],
[-2.7457, -1.7517]],
[[ 1.7166, 2.2207],
[ 2.2207, -2.0898]]], dtype=torch.float64)
>>> w = torch.linalg.eigvalsh(a)
>>> w
tensor([[ 2.5797, 3.4629],
[-4.1605, 1.3780],
[-3.1113, 2.7381]], dtype=torch.float64)
""")
matrix_rank = _add_docstr(_linalg.linalg_matrix_rank, r"""
matrix_rank(input, tol=None, hermitian=False, *, out=None) -> Tensor
Computes the numerical rank of a matrix :attr:`input`, or of each matrix in a batched :attr:`input`.
The matrix rank is computed as the number of singular values (or absolute eigenvalues when :attr:`hermitian` is ``True``)
that are greater than the specified :attr:`tol` threshold.
If :attr:`tol` is not specified, :attr:`tol` is set to ``S.max(dim=-1)*max(input.shape[-2:])*eps``,
where ``S`` is the singular values (or absolute eigenvalues when :attr:`hermitian` is ``True``), and
``eps`` is the epsilon value for the datatype of :attr:`input`. The epsilon value can be obtained using
the ``eps`` attribute of :class:`torch.finfo`.
Supports input of float, double, cfloat and cdouble dtypes.
.. note:: When given inputs on a CUDA device, this function synchronizes that device with the CPU.
.. note:: The matrix rank is computed using singular value decomposition (see :func:`torch.linalg.svd`) by default.
If :attr:`hermitian` is ``True``, then :attr:`input` is assumed to be Hermitian (symmetric if real-valued),
and the computation is done by obtaining the eigenvalues (see :func:`torch.linalg.eigvalsh`).
Args:
input (Tensor): the input matrix of size `(m, n)` or the batch of matrices of size `(*, m, n)`
where `*` is one or more batch dimensions.
tol (float, optional): the tolerance value. Default is ``None``
hermitian(bool, optional): indicates whether :attr:`input` is Hermitian. Default is ``False``.
Keyword args:
out (Tensor, optional): tensor to write the output to. Default is ``None``.
Examples::
>>> a = torch.eye(10)
>>> torch.linalg.matrix_rank(a)
tensor(10)
>>> b = torch.eye(10)
>>> b[0, 0] = 0
>>> torch.linalg.matrix_rank(b)
tensor(9)
>>> a = torch.randn(4, 3, 2)
>>> torch.linalg.matrix_rank(a)
tensor([2, 2, 2, 2])
>>> a = torch.randn(2, 4, 2, 3)
>>> torch.linalg.matrix_rank(a)
tensor([[2, 2, 2, 2],
[2, 2, 2, 2]])
>>> a = torch.randn(2, 4, 3, 3, dtype=torch.complex64)
>>> torch.linalg.matrix_rank(a)
tensor([[3, 3, 3, 3],
[3, 3, 3, 3]])
>>> torch.linalg.matrix_rank(a, hermitian=True)
tensor([[3, 3, 3, 3],
[3, 3, 3, 3]])
>>> torch.linalg.matrix_rank(a, tol=1.0)
tensor([[3, 2, 2, 2],
[1, 2, 1, 2]])
>>> torch.linalg.matrix_rank(a, tol=1.0, hermitian=True)
tensor([[2, 2, 2, 1],
[1, 2, 2, 2]])
""")
norm = _add_docstr(_linalg.linalg_norm, r"""
linalg.norm(input, ord=None, dim=None, keepdim=False, *, out=None, dtype=None) -> Tensor
Returns the matrix norm or vector norm of a given tensor.
This function can calculate one of eight different types of matrix norms, or one
of an infinite number of vector norms, depending on both the number of reduction
dimensions and the value of the `ord` parameter.
Args:
input (Tensor): The input tensor. If dim is None, x must be 1-D or 2-D, unless :attr:`ord`
is None. If both :attr:`dim` and :attr:`ord` are None, the 2-norm of the input flattened to 1-D
will be returned. Its data type must be either a floating point or complex type. For complex
inputs, the norm is calculated on of the absolute values of each element. If the input is
complex and neither :attr:`dtype` nor :attr:`out` is specified, the result's data type will
be the corresponding floating point type (e.g. float if :attr:`input` is complexfloat).
ord (int, float, inf, -inf, 'fro', 'nuc', optional): The order of norm.
inf refers to :attr:`float('inf')`, numpy's :attr:`inf` object, or any equivalent object.
The following norms can be calculated:
===== ============================ ==========================
ord norm for matrices norm for vectors
===== ============================ ==========================
None Frobenius norm 2-norm
'fro' Frobenius norm -- not supported --
'nuc' nuclear norm -- not supported --
inf max(sum(abs(x), dim=1)) max(abs(x))
-inf min(sum(abs(x), dim=1)) min(abs(x))
0 -- not supported -- sum(x != 0)
1 max(sum(abs(x), dim=0)) as below
-1 min(sum(abs(x), dim=0)) as below
2 2-norm (largest sing. value) as below
-2 smallest singular value as below
other -- not supported -- sum(abs(x)**ord)**(1./ord)
===== ============================ ==========================
Default: ``None``
dim (int, 2-tuple of ints, 2-list of ints, optional): If :attr:`dim` is an int,
vector norm will be calculated over the specified dimension. If :attr:`dim`
is a 2-tuple of ints, matrix norm will be calculated over the specified
dimensions. If :attr:`dim` is None, matrix norm will be calculated
when the input tensor has two dimensions, and vector norm will be
calculated when the input tensor has one dimension. Default: ``None``
keepdim (bool, optional): If set to True, the reduced dimensions are retained
in the result as dimensions with size one. Default: ``False``
Keyword args:
out (Tensor, optional): The output tensor. Ignored if ``None``. Default: ``None``
dtype (:class:`torch.dtype`, optional): If specified, the input tensor is cast to
:attr:`dtype` before performing the operation, and the returned tensor's type
will be :attr:`dtype`. If this argument is used in conjunction with the
:attr:`out` argument, the output tensor's type must match this argument or a
RuntimeError will be raised. Default: ``None``
Examples::
>>> import torch
>>> from torch import linalg as LA
>>> a = torch.arange(9, dtype=torch.float) - 4
>>> a
tensor([-4., -3., -2., -1., 0., 1., 2., 3., 4.])
>>> b = a.reshape((3, | |
conflict_mode == ConflictMode.Adjacent:
# Take the signal adjacent to the position we are in
if position_now == 0:
# Cannot decide -> ignore
is_entry = False
is_exit = False
else:
if direction == Direction.Both:
if position_now > 0:
is_exit = False
elif position_now < 0:
is_entry = False
else:
is_exit = False
elif conflict_mode == ConflictMode.Opposite:
# Take the signal opposite to the position we are in
if position_now == 0:
# Cannot decide -> ignore
is_entry = False
is_exit = False
else:
if direction == Direction.Both:
if position_now > 0:
is_entry = False
elif position_now < 0:
is_exit = False
else:
is_entry = False
else:
is_entry = False
is_exit = False
return is_entry, is_exit
@njit(cache=True)
def resolve_dir_conflict_nb(position_now: float,
is_long_entry: bool,
is_short_entry: bool,
upon_dir_conflict: int) -> tp.Tuple[bool, bool]:
"""Resolve any direction conflict between a long entry and a short entry."""
if is_long_entry and is_short_entry:
if upon_dir_conflict == DirectionConflictMode.Long:
is_short_entry = False
elif upon_dir_conflict == DirectionConflictMode.Short:
is_long_entry = False
elif upon_dir_conflict == DirectionConflictMode.Adjacent:
if position_now > 0:
is_short_entry = False
elif position_now < 0:
is_long_entry = False
else:
is_long_entry = False
is_short_entry = False
elif upon_dir_conflict == DirectionConflictMode.Opposite:
if position_now > 0:
is_long_entry = False
elif position_now < 0:
is_short_entry = False
else:
is_long_entry = False
is_short_entry = False
else:
is_long_entry = False
is_short_entry = False
return is_long_entry, is_short_entry
@njit(cache=True)
def resolve_opposite_entry_nb(position_now: float,
is_long_entry: bool,
is_long_exit: bool,
is_short_entry: bool,
is_short_exit: bool,
upon_opposite_entry: int,
accumulate: int) -> tp.Tuple[bool, bool, bool, bool, int]:
"""Resolve opposite entry."""
if position_now > 0 and is_short_entry:
if upon_opposite_entry == OppositeEntryMode.Ignore:
is_short_entry = False
elif upon_opposite_entry == OppositeEntryMode.Close:
is_short_entry = False
is_long_exit = True
accumulate = AccumulationMode.Disabled
elif upon_opposite_entry == OppositeEntryMode.CloseReduce:
is_short_entry = False
is_long_exit = True
elif upon_opposite_entry == OppositeEntryMode.Reverse:
accumulate = AccumulationMode.Disabled
if position_now < 0 and is_long_entry:
if upon_opposite_entry == OppositeEntryMode.Ignore:
is_long_entry = False
elif upon_opposite_entry == OppositeEntryMode.Close:
is_long_entry = False
is_short_exit = True
accumulate = AccumulationMode.Disabled
elif upon_opposite_entry == OppositeEntryMode.CloseReduce:
is_long_entry = False
is_short_exit = True
elif upon_opposite_entry == OppositeEntryMode.Reverse:
accumulate = AccumulationMode.Disabled
return is_long_entry, is_long_exit, is_short_entry, is_short_exit, accumulate
@njit(cache=True)
def signals_to_size_nb(position_now: float,
is_long_entry: bool,
is_long_exit: bool,
is_short_entry: bool,
is_short_exit: bool,
size: float,
size_type: int,
accumulate: int,
val_price_now: float) -> tp.Tuple[float, int, int]:
"""Translate direction-aware signals into size, size type, and direction."""
if size_type != SizeType.Amount and size_type != SizeType.Value and size_type != SizeType.Percent:
raise ValueError("Only SizeType.Amount, SizeType.Value, and SizeType.Percent are supported")
order_size = 0.
direction = Direction.Both
abs_position_now = abs(position_now)
if is_less_nb(size, 0):
raise ValueError("Negative size is not allowed. You must express direction using signals.")
if position_now > 0:
# We're in a long position
if is_short_entry:
if accumulate == AccumulationMode.Both or accumulate == AccumulationMode.RemoveOnly:
# Decrease the position
order_size = -size
else:
# Reverse the position
order_size = -abs_position_now
if not np.isnan(size):
if size_type == SizeType.Percent:
raise ValueError(
"SizeType.Percent does not support position reversal using signals")
if size_type == SizeType.Value:
order_size -= size / val_price_now
else:
order_size -= size
size_type = SizeType.Amount
elif is_long_exit:
direction = Direction.LongOnly
if accumulate == AccumulationMode.Both or accumulate == AccumulationMode.RemoveOnly:
# Decrease the position
order_size = -size
else:
# Close the position
order_size = -abs_position_now
size_type = SizeType.Amount
elif is_long_entry:
direction = Direction.LongOnly
if accumulate == AccumulationMode.Both or accumulate == AccumulationMode.AddOnly:
# Increase the position
order_size = size
elif position_now < 0:
# We're in a short position
if is_long_entry:
if accumulate == AccumulationMode.Both or accumulate == AccumulationMode.RemoveOnly:
# Decrease the position
order_size = size
else:
# Reverse the position
order_size = abs_position_now
if not np.isnan(size):
if size_type == SizeType.Percent:
raise ValueError("SizeType.Percent does not support position reversal using signals")
if size_type == SizeType.Value:
order_size += size / val_price_now
else:
order_size += size
size_type = SizeType.Amount
elif is_short_exit:
direction = Direction.ShortOnly
if accumulate == AccumulationMode.Both or accumulate == AccumulationMode.RemoveOnly:
# Decrease the position
order_size = size
else:
# Close the position
order_size = abs_position_now
size_type = SizeType.Amount
elif is_short_entry:
direction = Direction.ShortOnly
if accumulate == AccumulationMode.Both or accumulate == AccumulationMode.AddOnly:
# Increase the position
order_size = -size
else:
if is_long_entry:
# Open long position
order_size = size
elif is_short_entry:
# Open short position
order_size = -size
return order_size, size_type, direction
@njit(cache=True)
def should_update_stop_nb(stop: float, upon_stop_update: int) -> bool:
"""Whether to update stop."""
if upon_stop_update == StopUpdateMode.Override or upon_stop_update == StopUpdateMode.OverrideNaN:
if not np.isnan(stop) or upon_stop_update == StopUpdateMode.OverrideNaN:
return True
return False
@njit(cache=True)
def get_stop_price_nb(position_now: float,
stop_price: float,
stop: float,
open: float,
low: float,
high: float,
hit_below: bool) -> float:
"""Get stop price.
If hit before open, returns open."""
if stop < 0:
raise ValueError("Stop value must be 0 or greater")
if (position_now > 0 and hit_below) or (position_now < 0 and not hit_below):
stop_price = stop_price * (1 - stop)
if open <= stop_price:
return open
if low <= stop_price <= high:
return stop_price
return np.nan
if (position_now < 0 and hit_below) or (position_now > 0 and not hit_below):
stop_price = stop_price * (1 + stop)
if stop_price <= open:
return open
if low <= stop_price <= high:
return stop_price
return np.nan
return np.nan
@njit
def no_signal_func_nb(c: SignalContext, *args) -> tp.Tuple[bool, bool, bool, bool]:
"""Placeholder signal function that returns no signal."""
return False, False, False, False
@njit
def no_adjust_sl_func_nb(c: AdjustSLContext, *args) -> tp.Tuple[float, bool]:
"""Placeholder function that returns the initial stop-loss value and trailing flag."""
return c.curr_stop, c.curr_trail
@njit
def no_adjust_tp_func_nb(c: AdjustTPContext, *args) -> float:
"""Placeholder function that returns the initial take-profit value."""
return c.curr_stop
SignalFuncT = tp.Callable[[SignalContext, tp.VarArg()], tp.Tuple[bool, bool, bool, bool]]
AdjustSLFuncT = tp.Callable[[AdjustSLContext, tp.VarArg()], tp.Tuple[float, bool]]
AdjustTPFuncT = tp.Callable[[AdjustTPContext, tp.VarArg()], float]
@njit
def simulate_from_signal_func_nb(target_shape: tp.Shape,
group_lens: tp.Array1d,
init_cash: tp.Array1d,
call_seq: tp.Array2d,
signal_func_nb: SignalFuncT = no_signal_func_nb,
signal_args: tp.ArgsLike = (),
size: tp.ArrayLike = np.asarray(np.inf),
price: tp.ArrayLike = np.asarray(np.inf),
size_type: tp.ArrayLike = np.asarray(SizeType.Amount),
fees: tp.ArrayLike = np.asarray(0.),
fixed_fees: tp.ArrayLike = np.asarray(0.),
slippage: tp.ArrayLike = np.asarray(0.),
min_size: tp.ArrayLike = np.asarray(0.),
max_size: tp.ArrayLike = np.asarray(np.inf),
size_granularity: tp.ArrayLike = np.asarray(np.nan),
reject_prob: tp.ArrayLike = np.asarray(0.),
lock_cash: tp.ArrayLike = np.asarray(False),
allow_partial: tp.ArrayLike = np.asarray(True),
raise_reject: tp.ArrayLike = np.asarray(False),
log: tp.ArrayLike = np.asarray(False),
accumulate: tp.ArrayLike = np.asarray(AccumulationMode.Disabled),
upon_long_conflict: tp.ArrayLike = np.asarray(ConflictMode.Ignore),
upon_short_conflict: tp.ArrayLike = np.asarray(ConflictMode.Ignore),
upon_dir_conflict: tp.ArrayLike = np.asarray(DirectionConflictMode.Ignore),
upon_opposite_entry: tp.ArrayLike = np.asarray(OppositeEntryMode.ReverseReduce),
val_price: tp.ArrayLike = np.asarray(np.inf),
open: tp.ArrayLike = np.asarray(np.nan),
high: tp.ArrayLike = np.asarray(np.nan),
low: tp.ArrayLike = np.asarray(np.nan),
close: tp.ArrayLike = np.asarray(np.nan),
sl_stop: tp.ArrayLike = np.asarray(np.nan),
sl_trail: tp.ArrayLike = np.asarray(False),
tp_stop: tp.ArrayLike = np.asarray(np.nan),
stop_entry_price: tp.ArrayLike = np.asarray(StopEntryPrice.Close),
stop_exit_price: tp.ArrayLike = np.asarray(StopExitPrice.StopLimit),
upon_stop_exit: tp.ArrayLike = np.asarray(StopExitMode.Close),
upon_stop_update: tp.ArrayLike = np.asarray(StopUpdateMode.Override),
adjust_sl_func_nb: AdjustSLFuncT = no_adjust_sl_func_nb,
adjust_sl_args: tp.Args = (),
adjust_tp_func_nb: AdjustTPFuncT = no_adjust_tp_func_nb,
adjust_tp_args: tp.Args = (),
use_stops: bool = True,
auto_call_seq: bool = False,
ffill_val_price: bool = True,
update_value: bool = False,
max_orders: tp.Optional[int] = None,
max_logs: int = 0,
flex_2d: bool = True) -> tp.Tuple[tp.RecordArray, tp.RecordArray]:
"""Creates an order out of each element by resolving entry and exit signals returned by `signal_func_nb`.
Iterates in the column-major order. Utilizes flexible broadcasting.
Signals are processed using the following pipeline:
1) If there is a stop signal, convert it to direction-aware signals and proceed to 7)
2) Get direction-aware signals using `signal_func_nb`
3) Resolve any entry and exit conflict of each direction using `resolve_signal_conflict_nb`
4) Resolve any direction conflict using `resolve_dir_conflict_nb`
5) Resolve an opposite entry signal scenario using `resolve_opposite_entry_nb`
7) Convert the final signals into size, size type, and direction using `signals_to_size_nb`
!!! note
Should be only grouped if cash sharing is enabled.
If `auto_call_seq` is True, make sure that `call_seq` follows `CallSeqType.Default`.
Single value should be passed as a 0-dim array (for example, by using `np.asarray(value)`).
Usage:
* Buy and hold using all cash and closing price (default):
```pycon
>>> import numpy as np
>>> from vectorbt.records.nb import col_map_nb
>>> from vectorbt.portfolio import nb
>>> from vectorbt.portfolio.enums import Direction
>>> close = np.array([1, 2, 3, 4, 5])[:, None]
>>> order_records, _ = nb.simulate_from_signal_func_nb(
... target_shape=close.shape,
... close=close,
... group_lens=np.array([1]),
... init_cash=np.array([100]),
... call_seq=np.full(close.shape, 0),
... signal_func_nb=nb.dir_enex_signal_func_nb,
... signal_args=(np.asarray(True), np.asarray(False), np.asarray(Direction.LongOnly))
... )
>>> col_map = col_map_nb(order_records['col'], close.shape[1])
>>> asset_flow = nb.asset_flow_nb(close.shape, order_records, col_map, Direction.Both)
>>> asset_flow
array([[100.],
[ 0.],
[ 0.],
[ 0.],
| |
#!/usr/bin/env python3
"""@@@
Main Program: stitch_heatmaps.py
(originally called anal_CTCF.py (ANALyze CTCF))
Classes: Anchor
Domain
BuildHeatMaps
Author: <NAME>
creation date: mostly 2016 and up to March 2017.
last update: 200311 some cosmetic upgrades
version: 0
Purpose:
Build eheat files out of CCDs stored in the *.bed files and the
respective heatmaps.
The idea behind this code is to take the set of data from a bed file
containing CTCF or RNA polymerase II data (or both) and assemble it
within the boundaries of a heat map file. For example, take a file
from "loops.CTCF.annotated.bed"
CTCF CTCF PET cmplx cmplx cmplx
chr bgn end 1 2 cnt ity a b open active
chr1 838908 912011 R L 11 0 4 6 0.0686291944243 0.426918183932
chr1 838908 922335 R R 5 1 6 8 0.0601364066789 0.397329401752
chr1 838908 1000167 R L 7 3 15 17 0.0910398799447 0.5333717808
chr1 918286 969271 R R 5 0 4 4 0.119015396685 0.543963910954
chr1 918286 1000167 R L 75 1 8 8 0.11802493863 0.65697780926
chr1 918286 1059032 R R 4 2 12 12 0.09421226891 0.648785755901
chr1 967152 1000167 R L 52 0 3 3 0.152052097531 0.842677570801
chr1 967152 1059032 R R 6 1 7 7 0.0937744884632 0.711155855464
chr1 967152 1308124 R L 7 2 34 34 0.0720704339359 0.62287812489
chr1 1306262 1377332 L L 7 0 12 12 0.0938229914169 0.663528915154
chr1 1890973 1978919 R L 41 0 -2 0 0.0051054055898 0.0633229481727
chr1 1890973 2106697 R L 13 2 -1 3 0.0118067530734 0.228560568133
chr1 1890973 3341691 R L 4 9 44 49 0.0292889451982 0.212245246836
chr1 2021345 2106697 R L 11 0 -1 1 0.0131689942825 0.359991564345
chr1 2021345 2316695 R L 9 2 17 18 0.0530184526833 0.47499238192
chr1 2105324 2316695 L L 7 1 16 16 0.0687653462396 0.52343509753
chr1 2125307 2316695 R L 259 0 13 12 0.0720316843271 0.512090622192
chr1 2343625 2480764 R L 80 0 6 6 0.0746177236235 0.392732920613
There are two different starting points 838908 (with 3 entries) and
918286 (also with 3 entries). Hence, we can group the CTCFs to
( 838908, 1000167) c
( 838908, 922335) tR
( 838908, 912011) c
so, graphically, this first set suggests
-----x------------------|---|--------------------------------x------
|838908 912011| |922335 |1000167
|_________c________| | |
|_________tR___________| |
|_________c_____________________________________________|
and
( 918286, 969271) tr
( 918286, 1000167) c
( 918286, 1059032) tr
and
( 967152, 1000167) c
( 967152, 1059032) tr
( 967152, 1308124) c
and
(1306262, 1377332) tL
so these two data sets pictured graphically are arranged as follows:
_______tR______________________________________ .. _________
|_______c__________________________ |
|_______tR________ | |
| | | |
|918286 |969271 | |1059032
| _____c___________|____________ .. __________________ .. _________
| |_____tR__________|____________ .. _________ |
| |_____c___________| | |
| || | | |
| 967152||969271 | .. |1059032 .. |1308124
| || | | |
|838908 | || |1000167 .. |1059032 |1308124
-----x------------------|-|-|--------------||----------------x------------ .. ---------x-------- .. ----------x----------- .------------
|838908 912011| |922335 |1000167 1306262| |1377332
|_________c________| | | |____________ .. _____|
|_________tR___________| |
|_________c_____________________________________________|
separation between the short fragments:
int( 0.5 + ( 918286 - 912011) / 5000 ) = 1
int( 0.5 + ( 922335 - 918286) / 5000 ) = 1
int( 0.5 + ( 922335 - 912011) / 5000 ) = 2
int( 0.5 + ( 969271 - 967152) / 5000 ) = 0
int( 0.5 + (1308124 -1306262) / 5000 ) = 0
absolute points on the grid from 838908
int( 0.5 + ( 912011 - 838908) / 5000 ) = 15*
int( 0.5 + ( 918286 - 838908) / 5000 ) = 16*
int( 0.5 + ( 922335 - 838908) / 5000 ) = 17*
---
int( 0.5 + ( 967152 - 838908) / 5000 ) = 26*
int( 0.5 + ( 969271 - 838908) / 5000 ) = 26*
---
int( 0.5 + (1000167 - 838908) / 5000 ) = 32
---
int( 0.5 + (1059032 - 838908) / 5000 ) = 44
---
int( 0.5 + (1306262 - 838908) / 5000 ) = 93*
int( 0.5 + (1308124 - 838908) / 5000 ) = 94*
---
int( 0.5 + (1377332 - 838908) / 5000 ) = 108
where the "*" means that there are several CTCFs that are in very
close proximity with respect to the heatmap.
The input file is a list of loops formatted according to
Przemek's approach (with extension 'bed').
command line example:
> aseemble_heatmaps_and_CCDs.py -ff loops.CTCF.annotated.bed -dG
where the file "loops.CTCF.annotated.bed" contains a list of data
formatted according to Przemek. This has the format
file format1 (loops.CTCF.annotated.withAandB.bed):
version: 1
# CTCF analysis from ChIA-PET data and epigenetic info
# active
# open
# compartment A
# compartment B
chromatin_tags:
chr bgn end lCTCF rCTCF PETcnt cmplx1 cmplx2 cmplx3 active open A B
chromatin_data:
chr1 838908 912011 R L 11 0 4 6 0.0686291944243 0.426918183932 1.0 0.0
chr1 838908 922335 R R 5 1 6 8 0.0601364066789 0.397329401752 1.0 0.0
chr1 838908 1000167 R L 7 3 15 17 0.0910398799447 0.5333717808 1.0 0.0
chr1 918286 969271 R R 5 0 4 4 0.119015396685 0.543963910954 1.0 0.0
file format1 (loops.CTCF.annotated.bed):
CTCF CTCF PET cmplx cmplx cmplx
chr bgn end 1 2 cnt ity a b open active
chr1 838908 912011 R L 11 0 4 6 0.0686291944243 0.426918183932
chr1 838908 922335 R R 5 1 6 8 0.0601364066789 0.397329401752
chr1 838908 1000167 R L 7 3 15 17 0.0910398799447 0.5333717808
chr1 918286 969271 R R 5 0 4 4 0.119015396685 0.543963910954
chr1 918286 1000167 R L 75 1 8 8 0.11802493863 0.65697780926
chr1 918286 1059032 R R 4 2 12 12 0.09421226891 0.648785755901
file format2 (structure.loops.GSE6352.annotated.bed):
chr1 1050000 1190000 241 -1 9 9 0.0529785714286 0.418914285714
chr1 1585000 1650000 80 -2 0 0 0.0553384615385 0.737907692308
chr1 1710000 1840000 154 -2 22 22 0.0699230769231 0.874938461538
file format2 (structure.TAD_Rao.annotated.bed):
chr1 915000 1005000 0.72251 1 10 10 0.116533333333 0.643166666667
chr1 1030000 1235000 1.1954 -1 12 12 0.0460487804878 0.507936585366
chr1 1255000 1450000 0.9312 0 27 27 0.0741435897436 0.653230769231
In general, if more than one file is used (with extension "bed"),
all files should be in the same format.
These files often have different data related to open, active,
inactive, compartment A or B, etc., so the results typically
require more than one file. Therefore, multiple file entries are
allowed for both file types; however, in general, for "bed"
files, there should only be one input file.
"""
import sys
import os
import chreval
from FileTools import FileTools
from FileTools import getHeadExt
from CUtils import String
from ChromatinData import Data
from ChromatinData import make_file_heading
from BasicTools import initialize_matrix
from HeatMapTools import HeatMapTools
from assemble_heatmaps_and_CCDs import Domain
from assemble_heatmaps_and_CCDs import Anchor
from assemble_heatmaps_and_CCDs import BuildHeatMaps
from assemble_heatmaps_and_CCDs import build_tmpDir
from assemble_heatmaps_and_CCDs import assemble_eheatMaps
PROGRAM = "stitch_heatmaps.py"
EXTS = ["bed"]
string = String(10)
"""@
loop_file.bed -- file containing references to heat maps for specific
regions of the chromatin. These heatmaps are stitched
together to generate the desired extended heatmap.
template_file.bed -- This is a reference file that defines the desired
regions of interest and helps explain how a given
region should be stitched together.
out_dir -- name of the output director
"""
# tag for printing
USAGE = ["\n\nUSAGE: %s -ff [loop_file].bed [[loop_file2].bed ... ] -rr [template_file].bed [-out_dir]" % PROGRAM,
"",
"loop_file.bed -- file containing references to heat maps for specific",
" regions of the chromatin. These heatmaps are stitched",
" together to generate the desired extended heatmap.",
"",
"",
"template_file.bed -- This is a reference file that defines the desired",
" regions of interest and helps explain how a given",
" region should be stitched together.",
"",
"out_dir -- name of the output director" ]
#
def usage():
for uk in USAGE:
print (uk)
#|endfor
#
class StitchHeatMaps(BuildHeatMaps):
def __init__(self, ctcfdata, refdt):
BuildHeatMaps.__init__(self, ctcfdata) # inherit InputSettings
self.refData = refdt
self.ref_cdata = refdt.cdata
self.refkeylist = refdt.keylist
self.refDatatype = refdt.datatype
self.ref_missing = []
self.ctcf_missing = []
#
def stitchCTCFregions(self, inbedflnms, inrefflnm, debug = False):
"""@
The main job here is group CTCFs over the range that they form
a single island (the collection of arches in a typical display
of CTCF data in Tang et al.).
This procedure goes through the list of CTCF contact points
listed in the input bed file (which was read and processes
using an object of class Data in the module ChromatinData) and
groups all the common CTCFs that fit within a particular
island of CTCFs. The resulting groups can then be used to
stitch together heat maps.
The initial step in the process is to verify that the given
heat map indicated in the bed file actually exists. It is not
clear why at this point, but some of the bed entries do not
have any corresponding heat map files and are rejected as a
result. The ctcf_missing information is stored in a handle
"ctcf_missing". The ctcf_missing files cannot be used to
stitch together a heatmap, but they might be overlapped by
other CTCFs in the region that overlap the ctcf_missing
segment.
The reference or template file is also read and if no heat
maps from the ctcf data can be found that match or fit in the
region, then these missing regions are stored in ref_missing.
While the list of heatmap files is being established, we also
group the CTCF connections together in an object of class
Domain (here self.domain).
The size of the particular domain grouping of CTCFs is decided
by the data itself within the bed file. The borders of that
domain are therefore only fixed after the entire bed datafile
is scanned. It would, therefore, be possible that only one
large kluge sized domain is generated. In the current data
that I have from bed files, it is no so dense and thoroughly
entwined, but that is something this approach risks.
"""
debug_stitchCTCFregions = debug
self.ctcf_missing = []
if debug_stitchCTCFregions:
print ("stitchCTCFregions()")
print (".... combining CCD info from following bed files:")
for v in inbedflnms:
print (" <- %s" % v)
#|endfor
print ("reference AB regions file:")
print (" <- %s" % inrefflnm)
#sys.exit(0)
#
ctcfkeylist = self.ctcfkeylist
if len(self.ctcfkeylist) == 0:
# not sure that it really works, but it would seem that if
# there is some data loaded, then it can actually process
# it at this step too. I think the program should just
# stop if it doesn't have a set ctcfkeylist, but we will see.
print ("stitchCTCFregions: setting ctcfkeylist")
print ("length of the dataset: ", len(self.ctcf_cdata))
ctcfkeylist = self.ctcfData.ordered_keylist()
#
print ("stitchCTCFregions -- length of ctcf keylist: ", len(self.ctcfkeylist))
if debug_stitchCTCFregions:
| |
= request.POST.get('r24c11').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r24c12 = request.POST.get('r24c12').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r24c13 = request.POST.get('r24c13').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r24c14 = request.POST.get('r24c14').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r24c15 = request.POST.get('r24c15').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r24c16 = request.POST.get('r24c16').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
r24c17 = request.POST.get('r24c17').replace('\t', ' ').replace('\n', ' ').replace('\r', ' ')
body = '<!doctype html>' + \
'<html lang="en">' + \
'<head>' + \
'<meta charset="utf-8">' + \
'<meta name="viewport" content="width=device-width, initial-scale=1, shrink-to-fit=no">' + \
'<link rel="stylesheet"' + \
'href="https://cdn.jsdelivr.net/npm/bootstrap@4.5.3/dist/css/bootstrap.min.css"' + \
'integrity="sha384-TX8t27EcRE3e/ihU7zmQxVncDAy5uIKz4rEkgIXeMed4M0jlfID<KEY>"' + \
'crossorigin="anonymous">' + \
'<title>Annual income budget</title>' + \
'</head>' + \
'<body>' + \
'<div class="container">' + \
'<div class="card text-center">' + \
'<div class="card-header text-center">Annual income budget</div>' + \
'<div class="card-body">'
body += '<h6>Comapny name : ' + company_name + '</h6>' + \
'<h6>Share capital : ' + share_capital + '</h6>' + \
'<h6>Head office address : ' + head_office_address + '</h6>' + \
'<h6>Establishment number : ' + establishment_number + '</h6>' + \
'<h6>Register of Trade and Companies : ' + register_of_trade_and_companies + '</h6>' + \
'<h6>Main activities : ' + main_activities + '</h6>' + \
'<h6>Activity number : ' + activity_number + '</h6>' + \
'<h6>Intra-community VAT number : ' + intra_community_vat_number + '</h6>' + \
'<h6>President : ' + president + '</h6>' + \
'<h6>Registration date : ' + registration_date + '</h6>' + \
'<br>'
body += '<br>'
body += '<table class="table table-striped table-bordered">' + \
'<thead>' + \
'<tr>' + \
'<th scope="col">Details</th>' + \
'<th scope="col">M1</th>' + \
'<th scope="col">M2</th>' + \
'<th scope="col">M3</th>' + \
'<th scope="col">Q1</th>' + \
'<th scope="col">M4</th>' + \
'<th scope="col">M5</th>' + \
'<th scope="col">M6</th>' + \
'<th scope="col">Q2</th>' + \
'<th scope="col">M7</th>' + \
'<th scope="col">M8</th>' + \
'<th scope="col">M9</th>' + \
'<th scope="col">Q3</th>' + \
'<th scope="col">M10</th>' + \
'<th scope="col">M11</th>' + \
'<th scope="col">M12</th>' + \
'<th scope="col">Q4</th>' + \
'<th scope="col">Total</th>' + \
'</tr>' + \
'</thead>' + \
'<tbody>' + \
'<tr>' + \
'<td>Sales budget income</td>' + \
'<td>' + r1c1 + '</td>' + \
'<td>' + r1c2 + '</td>' + \
'<td>' + r1c3 + '</td>' + \
'<td>' + r1c4 + '</td>' + \
'<td>' + r1c5 + '</td>' + \
'<td>' + r1c6 + '</td>' + \
'<td>' + r1c7 + '</td>' + \
'<td>' + r1c8 + '</td>' + \
'<td>' + r1c9 + '</td>' + \
'<td>' + r1c10 + '</td>' + \
'<td>' + r1c11 + '</td>' + \
'<td>' + r1c12 + '</td>' + \
'<td>' + r1c13 + '</td>' + \
'<td>' + r1c14 + '</td>' + \
'<td>' + r1c15 + '</td>' + \
'<td>' + r1c16 + '</td>' + \
'<td>' + r1c17 + '</td>' + \
'</tr>' + \
'<tr>' + \
'<td>Sales actual income</td>' + \
'<td>' + r2c1 + '</td>' + \
'<td>' + r2c2 + '</td>' + \
'<td>' + r2c3 + '</td>' + \
'<td>' + r2c4 + '</td>' + \
'<td>' + r2c5 + '</td>' + \
'<td>' + r2c6 + '</td>' + \
'<td>' + r2c7 + '</td>' + \
'<td>' + r2c8 + '</td>' + \
'<td>' + r2c9 + '</td>' + \
'<td>' + r2c10 + '</td>' + \
'<td>' + r2c11 + '</td>' + \
'<td>' + r2c12 + '</td>' + \
'<td>' + r2c13 + '</td>' + \
'<td>' + r2c14 + '</td>' + \
'<td>' + r2c15 + '</td>' + \
'<td>' + r2c16 + '</td>' + \
'<td>' + r2c17 + '</td>' + \
'</tr>' + \
'<tr>' + \
'<td>Sales variance income</td>' + \
'<td>' + r3c1 + '</td>' + \
'<td>' + r3c2 + '</td>' + \
'<td>' + r3c3 + '</td>' + \
'<td>' + r3c4 + '</td>' + \
'<td>' + r3c5 + '</td>' + \
'<td>' + r3c6 + '</td>' + \
'<td>' + r3c7 + '</td>' + \
'<td>' + r3c8 + '</td>' + \
'<td>' + r3c9 + '</td>' + \
'<td>' + r3c10 + '</td>' + \
'<td>' + r3c11 + '</td>' + \
'<td>' + r3c12 + '</td>' + \
'<td>' + r3c13 + '</td>' + \
'<td>' + r3c14 + '</td>' + \
'<td>' + r3c15 + '</td>' + \
'<td>' + r3c16 + '</td>' + \
'<td>' + r3c17 + '</td>' + \
'</tr>' + \
'<tr>' + \
'<td>Donations budget income</td>' + \
'<td>' + r4c1 + '</td>' + \
'<td>' + r4c2 + '</td>' + \
'<td>' + r4c3 + '</td>' + \
'<td>' + r4c4 + '</td>' + \
'<td>' + r4c5 + '</td>' + \
'<td>' + r4c6 + '</td>' + \
'<td>' + r4c7 + '</td>' + \
'<td>' + r4c8 + '</td>' + \
'<td>' + r4c9 + '</td>' + \
'<td>' + r4c10 + '</td>' + \
'<td>' + r4c11 + '</td>' + \
'<td>' + r4c12 + '</td>' + \
'<td>' + r4c13 + '</td>' + \
'<td>' + r4c14 + '</td>' + \
'<td>' + r4c15 + '</td>' + \
'<td>' + r4c16 + '</td>' + \
'<td>' + r4c17 + '</td>' + \
'</tr>' + \
'<tr>' + \
'<td>Donations actual income</td>' + \
'<td>' + r5c1 + '</td>' + \
'<td>' + r5c2 + '</td>' + \
'<td>' + r5c3 + '</td>' + \
'<td>' + r5c4 + '</td>' + \
'<td>' + r5c5 + '</td>' + \
'<td>' + r5c6 + '</td>' + \
'<td>' + r5c7 + '</td>' + \
'<td>' + r5c8 + '</td>' + \
'<td>' + r5c9 + '</td>' + \
'<td>' + r5c10 + '</td>' + \
'<td>' + r5c11 + '</td>' + \
'<td>' + r5c12 + '</td>' + \
'<td>' + r5c13 + '</td>' + \
'<td>' + r5c14 + '</td>' + \
'<td>' + r5c15 + '</td>' + \
'<td>' + r5c16 + '</td>' + \
'<td>' + r5c17 + '</td>' + \
'</tr>' + \
'<tr>' + \
'<td>Donations variance income</td>' + \
'<td>' + r6c1 + '</td>' + \
'<td>' + r6c2 + '</td>' + \
'<td>' + r6c3 + '</td>' + \
'<td>' + r6c4 + '</td>' + \
'<td>' + r6c5 + '</td>' + \
'<td>' + r6c6 + '</td>' + \
'<td>' + r6c7 + '</td>' + \
'<td>' + r6c8 + '</td>' + \
'<td>' + r6c9 + '</td>' + \
'<td>' + r6c10 + '</td>' + \
'<td>' + r6c11 + '</td>' + \
'<td>' + r6c12 + '</td>' + \
'<td>' + r6c13 + '</td>' + \
'<td>' + r6c14 + '</td>' + \
'<td>' + r6c15 + '</td>' + \
'<td>' + r6c16 + '</td>' + \
'<td>' + r6c17 + '</td>' + \
'</tr>' + \
'<tr>' + \
'<td>Grants budget income</td>' + \
'<td>' + r7c1 + '</td>' + \
'<td>' + r7c2 + '</td>' + \
'<td>' + r7c3 + '</td>' + \
'<td>' + r7c4 + '</td>' + \
'<td>' + r7c5 + '</td>' + \
'<td>' + r7c6 + '</td>' + \
'<td>' + r7c7 + '</td>' + \
'<td>' + r7c8 + '</td>' + \
'<td>' + r7c9 + '</td>' + \
'<td>' + r7c10 + '</td>' + \
'<td>' + r7c11 + '</td>' + \
'<td>' + r7c12 + '</td>' + \
'<td>' + r7c13 + '</td>' + \
'<td>' + r7c14 + '</td>' + \
'<td>' + r7c15 + '</td>' + \
'<td>' + r7c16 + '</td>' + \
'<td>' + r7c17 + '</td>' + \
'</tr>' + \
'<tr>' + \
'<td>Grants actual income</td>' + \
'<td>' + r8c1 + '</td>' + \
'<td>' + r8c2 + '</td>' + \
'<td>' + r8c3 + '</td>' + \
'<td>' + r8c4 + '</td>' + \
'<td>' + r8c5 + '</td>' + \
'<td>' + r8c6 + '</td>' + \
'<td>' + r8c7 + '</td>' + \
'<td>' + r8c8 + '</td>' + \
'<td>' + r8c9 + '</td>' + \
'<td>' + r8c10 + '</td>' + \
'<td>' + r8c11 + '</td>' + \
'<td>' + r8c12 + '</td>' + \
'<td>' + r8c13 + '</td>' + \
'<td>' + r8c14 + '</td>' + \
'<td>' + r8c15 + '</td>' + \
'<td>' + r8c16 + '</td>' + \
'<td>' + r8c17 + '</td>' + \
'</tr>' + \
'<tr>' + \
'<td>Grants variance income</td>' + \
'<td>' + r9c1 + '</td>' + \
'<td>' + r9c2 + '</td>' + \
'<td>' + r9c3 + '</td>' + \
'<td>' + r9c4 + '</td>' + \
'<td>' + r9c5 + '</td>' + \
'<td>' + r9c6 + '</td>' + \
'<td>' + r9c7 + '</td>' + \
'<td>' + r9c8 + '</td>' + \
'<td>' + r9c9 + '</td>' + \
'<td>' + r9c10 + '</td>' + \
'<td>' + r9c11 + '</td>' + \
'<td>' + r9c12 + '</td>' + \
'<td>' + r9c13 + '</td>' + \
'<td>' + r9c14 + '</td>' + \
'<td>' + r9c15 + '</td>' + \
'<td>' + r9c16 + '</td>' + \
'<td>' + r9c17 + '</td>' + \
'</tr>' + \
'<tr>' + \
'<td>Gifts budget income</td>' + \
'<td>' + r10c1 + '</td>' + \
'<td>' + r10c2 + | |
might not be 'available'.
open_access = self._work(with_open_access_download=True, title="open access")
open_access.quality = 1
self_hosted = self._work(
with_license_pool=True, self_hosted=True, title="self hosted"
)
unlimited_access = self._work(
with_license_pool=True, unlimited_access=True, title="unlimited access"
)
available = self._work(with_license_pool=True, title="available")
[pool] = available.license_pools
pool.licenses_owned = 1
pool.licenses_available = 1
not_available = self._work(with_license_pool=True, title="not available")
[pool] = not_available.license_pools
pool.licenses_owned = 1
pool.licenses_available = 0
not_licensed = self._work(with_license_pool=True, title="not licensed")
[pool] = not_licensed.license_pools
pool.licenses_owned = 0
pool.licenses_available = 0
qu = (
self._db.query(Work)
.join(Work.license_pools)
.join(LicensePool.presentation_edition)
)
for availability, expect in [
(
Facets.AVAILABLE_NOW,
[open_access, available, self_hosted, unlimited_access],
),
(
Facets.AVAILABLE_ALL,
[open_access, available, not_available, self_hosted, unlimited_access],
),
(Facets.AVAILABLE_NOT_NOW, [not_available]),
]:
facets = Facets(self._default_library, None, availability, None)
modified = facets.modify_database_query(self._db, qu)
assert (availability, sorted([x.title for x in modified])) == (
availability,
sorted([x.title for x in expect]),
)
# Setting the 'featured' collection includes only known
# high-quality works.
for collection, expect in [
(
Facets.COLLECTION_FULL,
[open_access, available, self_hosted, unlimited_access],
),
(Facets.COLLECTION_FEATURED, [open_access]),
]:
facets = Facets(
self._default_library, collection, Facets.AVAILABLE_NOW, None
)
modified = facets.modify_database_query(self._db, qu)
assert (collection, sorted([x.title for x in modified])) == (
collection,
sorted([x.title for x in expect]),
)
class TestDefaultSortOrderFacets(DatabaseTest):
def setup_method(self):
super(TestDefaultSortOrderFacets, self).setup_method()
self.config = self._default_library
def _check_other_groups_not_changed(self, cls):
# Verify that nothing has changed for the collection or
# availability facet groups.
for group_name in (
Facets.COLLECTION_FACET_GROUP_NAME,
Facets.AVAILABILITY_FACET_GROUP_NAME,
):
assert Facets.available_facets(
self.config, group_name
) == cls.available_facets(self.config, group_name)
assert Facets.default_facet(self.config, group_name) == cls.default_facet(
self.config, group_name
)
def test_sort_order_rearrangement(self):
# Test the case where a DefaultSortOrderFacets does nothing but
# rearrange the default sort orders.
class TitleFirst(DefaultSortOrderFacets):
DEFAULT_SORT_ORDER = Facets.ORDER_TITLE
# In general, TitleFirst has the same options and
# defaults as a normal Facets object.
self._check_other_groups_not_changed(TitleFirst)
# But the default sort order for TitleFirst is ORDER_TITLE.
order = Facets.ORDER_FACET_GROUP_NAME
assert TitleFirst.DEFAULT_SORT_ORDER == TitleFirst.default_facet(
self.config, order
)
assert Facets.default_facet(self.config, order) != TitleFirst.DEFAULT_SORT_ORDER
# TitleFirst has the same sort orders as Facets, but ORDER_TITLE
# comes first in the list.
default_orders = Facets.available_facets(self.config, order)
title_first_orders = TitleFirst.available_facets(self.config, order)
assert set(default_orders) == set(title_first_orders)
assert Facets.ORDER_TITLE == title_first_orders[0]
assert default_orders[0] != Facets.ORDER_TITLE
def test_new_sort_order(self):
# Test the case where DefaultSortOrderFacets adds a sort order
# not ordinarily supported.
class SeriesFirst(DefaultSortOrderFacets):
DEFAULT_SORT_ORDER = Facets.ORDER_SERIES_POSITION
# In general, SeriesFirst has the same options and
# defaults as a normal Facets object.
self._check_other_groups_not_changed(SeriesFirst)
# But its default sort order is ORDER_SERIES.
order = Facets.ORDER_FACET_GROUP_NAME
assert SeriesFirst.DEFAULT_SORT_ORDER == SeriesFirst.default_facet(
self.config, order
)
assert (
Facets.default_facet(self.config, order) != SeriesFirst.DEFAULT_SORT_ORDER
)
# Its list of sort orders is the same as Facets, except Series
# has been added to the front of the list.
default = Facets.available_facets(self.config, order)
series = SeriesFirst.available_facets(self.config, order)
assert [SeriesFirst.DEFAULT_SORT_ORDER] + default == series
class TestDatabaseBackedFacets(DatabaseTest):
def test_available_facets(self):
# The only available sort orders are the ones that map
# directly onto a database field.
f1 = Facets
f2 = DatabaseBackedFacets
# The sort orders available to a DatabaseBackedFacets are a
# subset of the ones available to a Facets under the same
# configuration.
f1_orders = f1.available_facets(
self._default_library, FacetConstants.ORDER_FACET_GROUP_NAME
)
f2_orders = f2.available_facets(
self._default_library, FacetConstants.ORDER_FACET_GROUP_NAME
)
assert len(f2_orders) < len(f1_orders)
for order in f2_orders:
assert order in f1_orders and order in f2.ORDER_FACET_TO_DATABASE_FIELD
# The rules for collection and availability are the same.
for group in (
FacetConstants.COLLECTION_FACET_GROUP_NAME,
FacetConstants.AVAILABILITY_FACET_GROUP_NAME,
):
assert f1.available_facets(
self._default_library, group
) == f2.available_facets(self._default_library, group)
def test_default_facets(self):
# If the configured default sort order is not available,
# DatabaseBackedFacets chooses the first enabled sort order.
f1 = Facets
f2 = DatabaseBackedFacets
# The rules for collection and availability are the same.
for group in (
FacetConstants.COLLECTION_FACET_GROUP_NAME,
FacetConstants.AVAILABILITY_FACET_GROUP_NAME,
):
assert f1.default_facet(self._default_library, group) == f2.default_facet(
self._default_library, group
)
# In this bizarre library, the default sort order is 'time
# added to collection' -- an order not supported by
# DatabaseBackedFacets.
class Mock(object):
enabled = [
FacetConstants.ORDER_ADDED_TO_COLLECTION,
FacetConstants.ORDER_TITLE,
FacetConstants.ORDER_AUTHOR,
]
def enabled_facets(self, group_name):
return self.enabled
def default_facet(self, group_name):
return FacetConstants.ORDER_ADDED_TO_COLLECTION
# A Facets object uses the 'time added to collection' order by
# default.
config = Mock()
assert f1.ORDER_ADDED_TO_COLLECTION == f1.default_facet(
config, f1.ORDER_FACET_GROUP_NAME
)
# A DatabaseBacked Facets can't do that. It finds the first
# enabled sort order that it can support, and uses it instead.
assert f2.ORDER_TITLE == f2.default_facet(config, f2.ORDER_FACET_GROUP_NAME)
# If no enabled sort orders are supported, it just sorts
# by Work ID, so that there is always _some_ sort order.
config.enabled = [FacetConstants.ORDER_ADDED_TO_COLLECTION]
assert f2.ORDER_WORK_ID == f2.default_facet(config, f2.ORDER_FACET_GROUP_NAME)
def test_order_by(self):
E = Edition
W = Work
def order(facet, ascending=None):
f = DatabaseBackedFacets(
self._default_library,
collection=Facets.COLLECTION_FULL,
availability=Facets.AVAILABLE_ALL,
order=facet,
order_ascending=ascending,
)
return f.order_by()[0]
def compare(a, b):
assert len(a) == len(b)
for i in range(0, len(a)):
assert a[i].compare(b[i])
expect = [E.sort_author.asc(), E.sort_title.asc(), W.id.asc()]
actual = order(Facets.ORDER_AUTHOR, True)
compare(expect, actual)
expect = [E.sort_author.desc(), E.sort_title.asc(), W.id.asc()]
actual = order(Facets.ORDER_AUTHOR, False)
compare(expect, actual)
expect = [E.sort_title.asc(), E.sort_author.asc(), W.id.asc()]
actual = order(Facets.ORDER_TITLE, True)
compare(expect, actual)
expect = [
W.last_update_time.asc(),
E.sort_author.asc(),
E.sort_title.asc(),
W.id.asc(),
]
actual = order(Facets.ORDER_LAST_UPDATE, True)
compare(expect, actual)
# Unsupported sort order -> default (author, title, work ID)
expect = [E.sort_author.asc(), E.sort_title.asc(), W.id.asc()]
actual = order(Facets.ORDER_ADDED_TO_COLLECTION, True)
compare(expect, actual)
def test_modify_database_query(self):
# Set up works that are matched by different types of collections.
# A high-quality open-access work.
open_access_high = self._work(with_open_access_download=True)
open_access_high.quality = 0.8
# A low-quality open-access work.
open_access_low = self._work(with_open_access_download=True)
open_access_low.quality = 0.2
# A high-quality licensed work which is not currently available.
(licensed_e1, licensed_p1) = self._edition(
data_source_name=DataSource.OVERDRIVE, with_license_pool=True
)
licensed_high = self._work(presentation_edition=licensed_e1)
licensed_high.license_pools.append(licensed_p1)
licensed_high.quality = 0.8
licensed_p1.open_access = False
licensed_p1.licenses_owned = 1
licensed_p1.licenses_available = 0
# A low-quality licensed work which is currently available.
(licensed_e2, licensed_p2) = self._edition(
data_source_name=DataSource.OVERDRIVE, with_license_pool=True
)
licensed_p2.open_access = False
licensed_low = self._work(presentation_edition=licensed_e2)
licensed_low.license_pools.append(licensed_p2)
licensed_low.quality = 0.2
licensed_p2.licenses_owned = 1
licensed_p2.licenses_available = 1
# A high-quality work with unlimited access.
unlimited_access_high = self._work(
with_license_pool=True, unlimited_access=True
)
unlimited_access_high.quality = 0.8
qu = DatabaseBackedWorkList.base_query(self._db)
def facetify(
collection=Facets.COLLECTION_FULL,
available=Facets.AVAILABLE_ALL,
order=Facets.ORDER_TITLE,
):
f = DatabaseBackedFacets(
self._default_library, collection, available, order
)
return f.modify_database_query(self._db, qu)
# When holds are allowed, we can find all works by asking
# for everything.
library = self._default_library
library.setting(Library.ALLOW_HOLDS).value = "True"
everything = facetify()
assert 5 == everything.count()
# If we disallow holds, we lose one book even when we ask for
# everything.
library.setting(Library.ALLOW_HOLDS).value = "False"
everything = facetify()
assert 4 == everything.count()
assert licensed_high not in everything
library.setting(Library.ALLOW_HOLDS).value = "True"
# Even when holds are allowed, if we restrict to books
# currently available we lose the unavailable book.
available_now = facetify(available=Facets.AVAILABLE_NOW)
assert 4 == available_now.count()
assert licensed_high not in available_now
# If we restrict to open-access books we lose the two licensed
# books.
open_access = facetify(available=Facets.AVAILABLE_OPEN_ACCESS)
assert 2 == open_access.count()
assert licensed_high not in open_access
assert licensed_low not in open_access
assert unlimited_access_high not in open_access
# If we restrict to the featured collection we lose the two
# low-quality books.
featured_collection = facetify(collection=Facets.COLLECTION_FEATURED)
assert 3 == featured_collection.count()
assert open_access_low not in featured_collection
assert licensed_low not in featured_collection
# Try some different orderings to verify that order_by()
# is called and used properly.
title_order = facetify(order=Facets.ORDER_TITLE)
assert [
open_access_high.id,
open_access_low.id,
licensed_high.id,
licensed_low.id,
unlimited_access_high.id,
] == [x.id for x in title_order]
assert ["sort_title", "sort_author", "id"] == [
x.name for x in title_order._distinct
]
# This sort order is not supported, so the default is used.
unsupported_order = facetify(order=Facets.ORDER_ADDED_TO_COLLECTION)
assert [
unlimited_access_high.id,
licensed_low.id,
licensed_high.id,
open_access_low.id,
open_access_high.id,
] == [x.id for x in unsupported_order]
assert ["sort_author", "sort_title", "id"] == [
x.name for x in unsupported_order._distinct
]
class TestFeaturedFacets(DatabaseTest):
def test_constructor(self):
# Verify that constructor arguments are stored.
entrypoint = object()
facets = FeaturedFacets(1, entrypoint, entrypoint_is_default=True)
assert 1 == facets.minimum_featured_quality
assert entrypoint == facets.entrypoint
assert True == facets.entrypoint_is_default
def test_feed_type(self):
# If a grouped feed is built via CachedFeed.fetch, it will be
# filed as a grouped feed.
assert CachedFeed.GROUPS_TYPE == FeaturedFacets.CACHED_FEED_TYPE
def test_default(self):
# Check how FeaturedFacets gets its minimum_featured_quality value.
library1 = self._default_library
library1.setting(Configuration.MINIMUM_FEATURED_QUALITY).value = 0.22
library2 = self._library()
library2.setting(Configuration.MINIMUM_FEATURED_QUALITY).value = 0.99
lane = self._lane(library=library2)
# FeaturedFacets can be instantiated for | |
<gh_stars>0
"""This is the baseclass for list box types"""
from __future__ import division
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
from builtins import chr
from datetime import datetime, timedelta
from abc import ABCMeta, abstractmethod, abstractproperty
from future.utils import with_metaclass
from asciimatics.event import KeyboardEvent, MouseEvent
from asciimatics.screen import Screen
from asciimatics.widgets.widget import Widget
from asciimatics.widgets.scrollbar import _ScrollBar
from re import match as re_match
from re import compile as re_compile
import os
import unicodedata
from asciimatics.widgets.utilities import _enforce_width
from future.moves.itertools import zip_longest
from asciimatics.utilities import readable_timestamp, readable_mem
class _CustomBaseListBox(with_metaclass(ABCMeta, Widget)):
"""
An Internal class to contain common function between list box types.
"""
__slots__ = [
"_options",
"_titles",
"_label",
"_line",
"_start_line",
"_required_height",
"_on_change",
"_on_select",
"_validator",
"_search",
"_last_search",
"_scroll_bar",
"_parser",
]
def __init__(
self,
height,
options,
titles=None,
label=None,
name=None,
parser=None,
on_change=None,
on_select=None,
validator=None,
):
"""
:param height: The required number of input lines for this widget.
:param options: The options for each row in the widget.
:param label: An optional label for the widget.
:param name: The name for the widget.
:param parser: Optional parser to colour text.
:param on_change: Optional function to call when selection changes.
:param on_select: Optional function to call when the user actually selects an entry from
this list - e.g. by double-clicking or pressing Enter.
:param validator: Optional function to validate selection for this widget.
"""
super(_CustomBaseListBox, self).__init__(name)
self._titles = titles
self._label = label
self._parser = parser
self._options = self._parse_options(options)
self._line = 0
self._value = None
self._start_line = 0
self._required_height = height
self._on_change = on_change
self._on_select = on_select
self._validator = validator
self._search = ""
self._last_search = datetime.now()
self._scroll_bar = None
def reset(self):
pass
def process_event(self, event):
if isinstance(event, KeyboardEvent):
if len(self._options) > 0 and event.key_code in [
ord("k"),
Screen.KEY_UP,
]:
# Move up one line in text - use value to trigger on_select.
self._line = max(0, self._line - 1)
self.value = self._options[self._line][1]
elif len(self._options) > 0 and event.key_code in [
ord("j"),
Screen.KEY_DOWN,
]:
# Move down one line in text - use value to trigger on_select.
self._line = min(len(self._options) - 1, self._line + 1)
self.value = self._options[self._line][1]
elif (
len(self._options) > 0 and event.key_code == Screen.KEY_PAGE_UP
):
# Move up one page.
self._line = max(
0, self._line - self._h + (1 if self._titles else 0)
)
self.value = self._options[self._line][1]
elif (
len(self._options) > 0
and event.key_code == Screen.KEY_PAGE_DOWN
):
# Move down one page.
self._line = min(
len(self._options) - 1,
self._line + self._h - (1 if self._titles else 0),
)
self.value = self._options[self._line][1]
elif event.key_code in [
Screen.ctrl("m"),
Screen.ctrl("j"),
ord("l"),
]:
# Fire select callback.
if self._on_select:
self._on_select()
elif event.key_code > 0:
# Treat any other normal press as a search
now = datetime.now()
if now - self._last_search >= timedelta(seconds=1):
self._search = ""
self._search += chr(event.key_code)
self._last_search = now
# If we find a new match for the search string, update the list selection
new_value = self._find_option(self._search)
if new_value is not None:
self.value = new_value
else:
return event
elif isinstance(event, MouseEvent):
# Mouse event - adjust for scroll bar as needed.
if event.buttons != 0:
# Check for normal widget.
if len(self._options) > 0 and self.is_mouse_over(
event,
include_label=False,
width_modifier=1 if self._scroll_bar else 0,
):
# Figure out selected line
new_line = event.y - self._y + self._start_line
if self._titles:
new_line -= 1
new_line = min(new_line, len(self._options) - 1)
# Update selection and fire select callback if needed.
if new_line >= 0:
self._line = new_line
self.value = self._options[self._line][1]
if (
event.buttons & MouseEvent.DOUBLE_CLICK != 0
and self._on_select
):
self._on_select()
return None
# Check for scroll bar interactions:
if self._scroll_bar:
if self._scroll_bar.process_event(event):
return None
# Ignore other mouse events.
return event
else:
# Ignore other events
return event
# If we got here, we processed the event - swallow it.
return None
def _add_or_remove_scrollbar(self, width, height, dy):
"""
Add or remove a scrollbar from this listbox based on height and available options.
:param width: Width of the Listbox
:param height: Height of the Listbox.
:param dy: Vertical offset from top of widget.
"""
if self._scroll_bar is None and len(self._options) > height:
self._scroll_bar = _ScrollBar(
self._frame.canvas,
self._frame.palette,
self._x + width - 1,
self._y + dy,
height,
self._get_pos,
self._set_pos,
)
elif self._scroll_bar is not None and len(self._options) <= height:
self._scroll_bar = None
def _get_pos(self):
"""
Get current position for scroll bar.
"""
if self._h >= len(self._options):
return 0
return self._start_line / (len(self._options) - self._h)
def _set_pos(self, pos):
"""
Set current position for scroll bar.
"""
if self._h < len(self._options):
pos *= len(self._options) - self._h
pos = int(round(max(0, pos), 0))
self._start_line = pos
@abstractmethod
def _find_option(self, search_value):
"""
Internal function called by the BaseListBox to do a text search on user input.
:param search_value: The string value to search for in the list.
:return: The value of the matching option (or None if nothing matches).
"""
def required_height(self, offset, width):
return self._required_height
@property
def start_line(self):
"""
The line that will be drawn at the top of the visible section of this list.
"""
return self._start_line
@start_line.setter
def start_line(self, new_value):
if 0 <= new_value < len(self._options):
self._start_line = new_value
@property
def value(self):
"""
The current value for this list box.
"""
return self._value
@value.setter
def value(self, new_value):
# Only trigger change notification after we've changed selection
old_value = self._value
self._value = new_value
for i, [_, value] in enumerate(self._options):
if value == new_value:
self._line = i
break
else:
# No matching value - pick a default.
if len(self._options) > 0:
self._line = 0
self._value = self._options[self._line][1]
else:
self._line = -1
self._value = None
if self._validator:
self._is_valid = self._validator(self._value)
if old_value != self._value and self._on_change:
self._on_change()
# Fix up the start line now that we've explicitly set a new value.
self._start_line = max(
0, max(self._line - self._h + 1, min(self._start_line, self._line))
)
def _parse_options(self, options):
"""
Parse a the options list for ColouredText.
:param options: the options list to parse
:returns: the options list parsed and converted to ColouredText as needed.
"""
if self._parser:
parsed_value = []
for option in options:
parsed_value.append((self._parse_option(option[0]), option[1]))
return parsed_value
return options
@abstractmethod
def _parse_option(self, option):
"""
Parse a single option for ColouredText.
:param option: the option to parse
:returns: the option parsed and converted to ColouredText.
"""
@abstractproperty
def options(self):
"""
The list of options available for user selection.
"""
class CustomMultiColumnListBox(_CustomBaseListBox):
"""
A MultiColumnListBox is a widget for displaying tabular data.
It displays a list of related data in columns, from which the user can select a line.
"""
def __init__(
self,
height,
columns,
options,
titles=None,
label=None,
name=None,
add_scroll_bar=False,
parser=None,
on_change=None,
on_select=None,
space_delimiter=" ",
):
"""
:param height: The required number of input lines for this ListBox.
:param columns: A list of widths and alignments for each column.
:param options: The options for each row in the widget.
:param titles: Optional list of titles for each column. Must match the length of
`columns`.
:param label: An optional label for the widget.
:param name: The name for the ListBox.
:param add_scroll_bar: Whether to add optional scrollbar for large lists.
:param parser: Optional parser to colour text.
:param on_change: Optional function to call when selection changes.
:param on_select: Optional function to call when the user actually selects an entry from
:param space_delimiter: Optional parameter to define the delimiter between columns.
The default value is blank space.
The `columns` parameter is a list of integers or strings. If it is an integer, this is
the absolute width of the column in characters. If it is a string, it must be of the
format "[<align>]<width>[%]" where:
* <align> is the alignment string ("<" = left, ">" = right, "^" = centre)
* <width> is the width in characters
* % is an optional qualifier that says the number is a percentage of the width of the
widget.
Column widths need to encompass any space required between columns, so for example, if
your column is 5 characters, allow 6 for an extra space at the end. It is not possible
to do | |
<gh_stars>0
"""Abstractions for the axes of a liquid-handling robot."""
# Standard imports
import logging
from abc import abstractmethod
# Local package imiports
from lhrhost.protocol.linear_actuator import Receiver as LinearActuatorReceiver
from lhrhost.util.containers import add_to_tree, get_from_tree
from lhrhost.util.files import load_from_json, save_to_json
from lhrhost.util.interfaces import InterfaceClass
# External imports
import scipy.stats as stats
# Logging
logger = logging.getLogger(__name__)
logger.addHandler(logging.NullHandler())
class RobotAxis(LinearActuatorReceiver, metaclass=InterfaceClass):
"""High-level controller mixin interface for axes with physical position units."""
@property
@abstractmethod
def protocol(self):
"""Return the associated linear actuator protocol."""
return None
@abstractmethod
def physical_to_sensor(self, physical_position):
"""Convert a position in physical units to a unitless sensor position."""
pass
@abstractmethod
def sensor_to_physical(self, sensor_position):
"""Convert a unitless sensor position to a position in physical units."""
pass
@property
@abstractmethod
def physical_unit(self):
"""Return a string representation of the physical units."""
pass
def load_tunings_json(self, json_path=None):
"""Load localized controller tunings from the provided JSON file path.
Default path: 'calibrations/{}_tunings.json' where {} is replaced with
the axis name.
"""
if json_path is None:
json_path = 'calibrations/{}_tunings.json'.format(self.name)
trees = load_from_json(json_path)
self.default_tuning = trees['default']
self.target_position_tunings = trees['target positions']
return trees
def save_tunings_json(self, json_path=None):
"""Save a localized controller tunings tree to the provided JSON file path."""
if json_path is None:
json_path = 'calibrations/{}_tunings.json'.format(self.name)
save_to_json({
'default': self.default_tuning,
'target positions': self.target_position_tunings
}, json_path)
async def go_to_sensor_position(
self, sensor_position, apply_tunings=True, restore_tunings=True
):
"""Go to the specified sensor position.
Returns the final sensor position.
"""
if apply_tunings:
current_tuning = self.default_tuning
for tuning in self.target_position_tunings:
if sensor_position >= tuning['min'] and sensor_position < tuning['max']:
current_tuning = tuning
else:
logger.debug(
'PID tunings for sensor position {} unspecified, using defaults.'
.format(int(sensor_position))
)
kp = current_tuning['pid']['kp']
kd = current_tuning['pid']['kd']
motor_limits = current_tuning['limits']['motor']
duty_forwards_max = motor_limits['forwards']['max']
duty_forwards_min = motor_limits['forwards']['min']
duty_backwards_max = motor_limits['backwards']['max']
duty_backwards_min = motor_limits['backwards']['min']
(prev_kp, prev_kd, prev_ki) = await self.set_pid_gains(kp=kp, kd=kd)
(
prev_duty_forwards_max, prev_duty_forwards_min,
prev_duty_backwards_max, prev_duty_backwards_min
) = await self.set_motor_limits(
forwards_max=duty_forwards_max, forwards_min=duty_forwards_min,
backwards_max=duty_backwards_max, backwards_min=duty_backwards_min
)
await self.protocol.feedback_controller.request_complete(
int(sensor_position)
)
if apply_tunings and restore_tunings:
await self.set_pid_gains(kp=prev_kp, kd=prev_kd, ki=prev_ki)
await self.set_motor_limits(
forwards_max=duty_forwards_max, forwards_min=duty_forwards_min,
backwards_max=duty_backwards_max, backwards_min=duty_backwards_min
)
return self.protocol.position.last_response_payload
async def go_to_low_end_position(self, speed=None):
"""Go to the lowest possible sensor position at the maximum allowed speed.
Speed must be given as a signed motor duty cycle.
"""
if speed is None:
speed = (
self.protocol.feedback_controller.limits.motor
.backwards.high.last_response_payload
)
await self.protocol.motor.request_complete(speed)
await self.protocol.position.request()
return self.protocol.position.last_response_payload
async def go_to_high_end_position(self, speed=None):
"""Go to the highest possible sensor position at the maximum allowed speed.
Speed must be given as a signed motor duty cycle.
"""
if speed is None:
speed = (
self.protocol.feedback_controller.limits.motor
.forwards.high.last_response_payload
)
await self.protocol.motor.request_complete(speed)
await self.protocol.position.request()
return self.protocol.position.last_response_payload
async def go_to_physical_position(self, physical_position):
"""Go to the specified physical position.
Returns the final physical position.
"""
sensor_position = self.physical_to_sensor(physical_position)
sensor_position = await self.go_to_sensor_position(sensor_position)
return self.sensor_to_physical(sensor_position)
async def move_by_sensor_delta(self, sensor_delta):
"""Go forwards/backwards by the specified sensor displacement.
Returns the final physical displacement.
"""
position = await self.sensor_position
target_position = position + sensor_delta
final_position = await self.go_to_sensor_position(target_position)
return final_position - position
async def move_by_physical_delta(self, physical_delta):
"""Go forwards/backwards by the specified physical displacement.
Returns the final physical displacement.
"""
position = await self.physical_position
target_position = position + physical_delta
final_position = await self.go_to_physical_position(target_position)
return final_position - position
async def wait_until_initialized(self):
"""Wait until the axis is ready to control."""
await self.protocol.initialized.wait()
await self.protocol.position.initialized.wait()
await self.protocol.motor.initialized.wait()
async def synchronize_values(self):
"""Request the values of all channels."""
await self.protocol.request_all()
@property
def name(self):
"""Return the name of the axis."""
return self.protocol.node_name
@property
def last_position_limits(self):
"""Get the last received position limits of the axis."""
return (
self.protocol.feedback_controller.limits.position.low.last_response_payload,
self.protocol.feedback_controller.limits.position.high.last_response_payload
)
@property
async def sensor_position(self):
"""Get the current sensor position of the axis."""
await self.protocol.position.request()
return self.last_sensor_position
@property
def last_sensor_position(self):
"""Get the last received sensor position of the axis."""
return self.protocol.position.last_response_payload
@property
async def physical_position(self):
"""Get the current physical position of the axis."""
await self.protocol.position.request()
return self.last_physical_position
@property
def last_physical_position(self):
"""Get the last received physical position of the axis."""
return self.sensor_to_physical(self.last_sensor_position)
async def set_pid_gains(self, kp=None, kd=None, ki=None, floating_point=True):
"""Set values for the PID gains whose values are specified.
Returns the previous values of the gains.
"""
pid_protocol = self.protocol.feedback_controller.pid
prev_kp = pid_protocol.kp.last_response_payload
prev_kd = pid_protocol.kd.last_response_payload
prev_ki = pid_protocol.ki.last_response_payload
if kp is not None and prev_kp != int(kp * 100 if floating_point else kp):
await pid_protocol.kp.request(int(kp * 100 if floating_point else kp))
if kd is not None and prev_kd != int(kd * 100 if floating_point else kp):
await pid_protocol.kd.request(int(kd * 100 if floating_point else kp))
if ki is not None and prev_ki != int(ki * 100 if floating_point else kp):
await pid_protocol.ki.request(int(ki * 100 if floating_point else kp))
return (
prev_kp / 100 if floating_point else prev_kp,
prev_kd / 100 if floating_point else prev_kd,
prev_ki / 100 if floating_point else prev_ki
)
async def set_motor_limits(
self, forwards_max=None, forwards_min=None, backwards_max=None, backwards_min=None
):
"""Set values for the motor duty cycle limits where specified.
Returns the previous values of the limits.
"""
limits_protocol = self.protocol.feedback_controller.limits.motor
prev_forwards_max = limits_protocol.forwards.high.last_response_payload
prev_forwards_min = limits_protocol.forwards.low.last_response_payload
prev_backwards_max = -limits_protocol.backwards.high.last_response_payload
prev_backwards_min = -limits_protocol.backwards.low.last_response_payload
if forwards_max is not None and prev_forwards_max != int(forwards_max):
await limits_protocol.forwards.high.request(int(forwards_max))
if forwards_min is not None and prev_forwards_min != int(forwards_min):
await limits_protocol.forwards.high.request(int(forwards_min))
if backwards_max is not None and prev_backwards_max != int(backwards_max):
await limits_protocol.backwards.high.request(int(-backwards_max))
if backwards_min is not None and prev_backwards_min != int(backwards_min):
await limits_protocol.backwards.high.request(int(-backwards_min))
return (
prev_forwards_max, prev_forwards_min,
prev_backwards_max, prev_backwards_min
)
class ContinuousRobotAxis(RobotAxis):
"""High-level controller mixin interface for axes with continuous positions.
Assumes a linear transformation exists between sensor and physical positions.
"""
def __init__(self):
"""Initialize member variables."""
super().__init__()
self._calibration_samples = []
self.linear_regression = None
def clear_calibration_samples(self):
"""Discard the stored calibration data."""
self._calibration_samples = []
self.linear_regression = None
def add_calibration_sample(self, sensor_position, physical_position):
"""Add a (sensor, physical) position pair for calibration."""
self.linear_regression = None
self._calibration_samples.append((sensor_position, physical_position))
def fit_calibration_linear(self):
"""Perform a linear regression on the calibration data and store results.
Returns the regression slope, intercept, R-value, and standard error.
The regression is for physical_position = slope * sensor_position + intercept.
"""
linear_regression = stats.linregress(self._calibration_samples)
self.linear_regression = [
linear_regression[0], linear_regression[1],
linear_regression[2], linear_regression[4]
]
return self.linear_regression
@property
def calibration_data(self):
"""Return a JSON-exportable structure of calibration data."""
calibration_data = {
'parameters': {
'slope': self.linear_regression[0],
'intercept': self.linear_regression[1],
'rsquared': self.linear_regression[2],
'stderr': self.linear_regression[3]
},
'physical unit': self.physical_unit,
'samples': [
{
'sensor': calibration_sample[0],
'physical': calibration_sample[1]
}
for calibration_sample in self._calibration_samples
]
}
return calibration_data
def load_calibration(self, calibration_data):
"""Load a calibration from the provided calibration data structure."""
self._calibration_samples = [
(calibration_sample['sensor'], calibration_sample['physical'])
for calibration_sample in calibration_data['samples']
]
self.fit_calibration_linear()
def load_calibration_json(self, json_path=None):
"""Load a calibration from a provided JSON file path.
Default path: 'calibrations/{}_physical.json' where {} is replaced with the
axis name.
"""
if json_path is None:
json_path = 'calibrations/{}_physical.json'.format(self.name)
self.load_calibration(load_from_json(json_path))
def save_calibration_json(self, json_path=None):
"""Save the calibration to the provided JSON file path.
Default path: 'calibrations/{}_physical.json' where {} is replaced with the
axis name.
"""
if json_path is None:
json_path = 'calibrations/{}_physical.json'.format(self.name)
save_to_json(self.calibration_data, json_path)
@property
def sensor_to_physical_scaling(self):
"""Return the scaling factor from sensor to physical positions."""
if self.linear_regression is None:
self._fit_calibration_linear()
return self.linear_regression[0]
@property
def sensor_to_physical_offset(self):
"""Return the post-scaling offset from sensor to physical positions."""
if self.linear_regression is None:
self._fit_calibration_linear()
return self.linear_regression[1]
# Implement RobotAxis
def physical_to_sensor(self, physical_position):
"""Convert a position in physical units to a unitless integer sensor position."""
return (
(physical_position - self.sensor_to_physical_offset) /
self.sensor_to_physical_scaling
)
def sensor_to_physical(self, sensor_position):
"""Convert a unitless sensor position to a position in physical units."""
return (
self.sensor_to_physical_scaling * sensor_position +
self.sensor_to_physical_offset
)
class PresetRobotAxis(RobotAxis):
"""High-level controller mixin for axes with preset positions."""
def __init__(self):
"""Initialize member variables."""
super().__init__()
self.preset_sensor_position_tree = {}
self.preset_physical_position_tree = {}
self.current_preset_position = None
def set_preset_sensor_position(self, preset_position, sensor_position):
"""Associate a preset position with a sensor position."""
try:
physical_position = self.preset_to_physical(
preset_position, use_sensor_if_needed=False
)
except (AttributeError, KeyError):
physical_position = None
if physical_position is not None:
raise KeyError(
'Preset position {} is already set to physical position {} {}!'
.format(preset_position, physical_position, self.physical_units)
)
add_to_tree(
self.preset_sensor_position_tree, preset_position,
sensor_position
)
def set_preset_physical_position(self, preset_position, physical_position):
"""Associate a preset position with a physical position."""
try:
sensor_position = self.preset_to_sensor(
preset_position, use_physical_if_needed=False
)
except KeyError:
sensor_position = None
if sensor_position is not None:
raise KeyError(
'Preset position {} is already set to sensor position {}!'
.format(preset_position, sensor_position)
)
add_to_tree(
self.preset_physical_position_tree, preset_position,
physical_position
)
def get_preset_position(self, presets_tree, preset_position):
"""Get an actual position from a | |
from math import *
from matplotlib import pyplot as plt
from matplotlib import colors as col
import numpy as np
import sys,os
def cart_comp(x,y):
"""
A basic comparison function to insure that layers are selected in terms of larger positive to larger negative
"""
return int(y-x)
def parse_tracer_file(files,overwrite_old_data=False):
"""
takes iSALE tracer file and appends the information at time 0 and final time and smashes it into a text file with tracer numbers and in a readable format this is then used to generate the final results file which can be used to plot by calling mk_final_results
@param: file_name -> name of the tracer file
@writes: TracerResults/tracer-out.txt -> parsed output file
"""
out_dict = {}
for filename in files:
out_str = ""
tracer_file_name = filename.split('/')
tracer_num = tracer_file_name[-1].strip('tracer-').strip('.txt')
tracer_file = open(filename)
header = tracer_file.readline().split()
header = [item for item in header if item != '(m)']
header_out = tracer_num + '\n'
for item in header[0:-1]:
header_out += item + '\t'
try: header_out += header[3][0:3] + '\t' + header[3][3:6]
except IndexError: header_out += header[-1]
out_str += header_out + '\n'
initial_values = tracer_file.readline().split()
initial_values_out = 'initial values\t'
for value in initial_values:
initial_values_out += value + '\t'
out_str += initial_values_out + '\n'
if len(initial_values) > 3:
max_v1 = float(initial_values[3])
max_v2 = float(initial_values[4])
for i,line in enumerate(tracer_file):
values = line.split()
try:
if values[3].isalpha(): print("extra header in file: " + filename + " on line: " + str(i)); continue
except IndexError: continue
if float(values[3]) > max_v1:
max_v1 = float(values[3])
if float(values[4]) > max_v2:
max_v2 = float(values[4])
else:
max_v1 = ""
max_v2 = ""
for line in tracer_file:
values = line.split()
final_values_out = 'final/max values\t'
for value in values[:3]:
final_values_out += value + '\t'
final_values_out += str(max_v1) + '\t' + str(max_v2)
out_str += final_values_out + '\n'
if float(initial_values[2]) not in out_dict.keys(): out_dict[float(initial_values[2])] = out_str
else: out_dict[float(initial_values[2])] += out_str
keys = out_dict.keys()
keys.sort(cmp=cart_comp)
if not os.path.exists("TracerResults/"):
os.makedirs("TracerResults")
if overwrite_old_data: output_file = open("TracerResults/tracer-parsed.txt", 'w+')
else: output_file = open("TracerResults/tracer-parsed.txt", 'a+') #+ tracer_num + "-out.txt", 'a+')
for key in keys:
output_file.write(out_dict[key] + "new layer\n")
tracer_file.close()
output_file.close()
def mk_final_results(directory="TracerResults", filename='tracer-parsed.txt', overwrite_old_data=False):
"""
takes parsed tracer file from iSALE and outputs a file containing info such as dip and distance between tracers as well as any saved iSALE plot variables and positions for the first tracer.
@param: directory -> directory to look for input file
@param: filename -> name of parsed iSALE tracer file
@writes: TracerResults/tracer-out.txt containing info meantioned above
"""
if not directory.endswith('/'):
directory += '/'
tracer_out_file = open(directory + filename, 'r')
if overwrite_old_data: final_results_file = open(directory + 'tracer-out.txt', 'w+')
else: final_results_file = open(directory + 'tracer-out.txt', 'a+')
current_tracer = tracer_out_file.readline().strip('\n')
header = tracer_out_file.readline().split()
CT_inital_values = map(float,tracer_out_file.readline().split()[2:])
CT_final_values = map(float,tracer_out_file.readline().split()[2:])
newlayer = False
while current_tracer:
current_tracer_str = ''
next_tracer = tracer_out_file.readline().strip('\n')
if next_tracer == 'new layer': next_tracer = tracer_out_file.readline().strip('\n'); newlayer=True
NT_header = tracer_out_file.readline().split()
NT_initial_values = map(float,tracer_out_file.readline().split()[2:])
NT_final_values = map(float,tracer_out_file.readline().split()[2:])
if len(CT_inital_values) < 3 or len(CT_final_values) < 3 or len(NT_initial_values) < 3 or len(NT_final_values) < 3:
break
current_tracer_str += current_tracer + "-" + next_tracer + '\n'
try: current_tracer_str += 'Time\tXmark\tYmark\tDis\tDip\tAng\t' + header[3] + '\t' + header[4] + '\n'
except IndexError: current_tracer_str += 'Time\tXmark\tYmark\tDis\tDip\tAng\t' + '\n'
for CT_values, NT_values in zip((CT_inital_values,CT_final_values), (NT_initial_values,NT_final_values)):
if CT_values[0] != NT_values[0]:
break
CT_x = float(CT_values[1])
CT_y = float(CT_values[2])
NT_x = float(NT_values[1])
NT_y = float(NT_values[2])
a = (NT_x - CT_x)
b = (NT_y - CT_y)
a_d,b_d = abs(a),abs(b)
c = sqrt(a**2 + b**2)
if a == 0: theta,theta_d = pi/2,pi/2
else:
theta = -acos((c**2 + a**2 - b**2)/(2*a*c))*(180/pi)
theta_d = acos((c**2 + a_d**2 - b_d**2)/(2*a_d*c))*(180/pi)
if b < 0: theta = -theta
if newlayer: c,theta,theta_d = 'None','None','None'; newlayer = False
try: current_tracer_str += str(CT_values[0]) + '\t' + str(CT_values[1]) + '\t' + str(CT_values[2]) + '\t' + str(c) + '\t' + str(theta_d) + '\t' + str(theta) + '\t' + str(CT_values[3]) + '\t' + str(CT_values[4]) + '\n'
except IndexError: current_tracer_str += str(CT_values[0]) + '\t' + str(CT_values[1]) + '\t' + str(CT_values[2]) + '\t' + str(c) + '\t' + str(theta_d) + '\t' + str(theta) + '\n'
if current_tracer_str.count("\n") == 4:
final_results_file.write(current_tracer_str)
current_tracer = next_tracer
CT_inital_values = NT_initial_values
CT_final_values = NT_final_values
tracer_out_file.close()
final_results_file.close()
def read_tracer_out_file(filepath):
"""
A utility function that reads in tracer out formated files into a dictionary of layers determined by initial y position each of which contains it's own dictionary of either final or initial values.
@param: filepath -> the path to the tracer-out file to read in
@return: layers -> lists of data in a dictionary of variables, in a dictionary of time steps, in a dictionary of layers
"""
tracer_out_file = open(filepath, 'r')
layers = {}
line,lnum = 'mary had a little lamb',0
while line:
values = line.split()
if values[0] == 'Time':
initial = tracer_out_file.readline().split()
final = tracer_out_file.readline().split()
if initial[2] not in layers.keys():
initial[2] = str(float(initial[2]))
layers[initial[2]] = {'initial':{}, 'final':{}}
if ('Xmark_short' not in layers[initial[2]]['initial']):
layers[initial[2]]['initial']['Xmark_short'] = []
if ('Ymark_short' not in layers[initial[2]]['initial']):
layers[initial[2]]['initial']['Ymark_short'] = []
if ('Xmark_short' not in layers[initial[2]]['final']):
layers[initial[2]]['final']['Xmark_short'] = []
if ('Ymark_short' not in layers[initial[2]]['final']):
layers[initial[2]]['final']['Ymark_short'] = []
for i in range(len(values)):
try:
if values[i] not in layers[initial[2]]['initial'].keys():
layers[initial[2]]['initial'][values[i]] = []
if values[i] not in layers[initial[2]]['final'].keys():
layers[initial[2]]['final'][values[i]] = []
if initial[i] == 'None' or final[i] == 'None': continue
layers[initial[2]]['initial'][values[i]].append(initial[i])
layers[initial[2]]['final'][values[i]].append(final[i])
if values[i] == 'Xmark' and 'None' not in initial and 'None' not in final:
layers[initial[2]]['initial']['Xmark_short'].append(initial[i])
layers[initial[2]]['final']['Xmark_short'].append(final[i])
elif values[i] == 'Ymark' and 'None' not in initial and 'None' not in final:
layers[initial[2]]['initial']['Ymark_short'].append(initial[i])
layers[initial[2]]['final']['Ymark_short'].append(final[i])
except IndexError:
import pdb
pdb.set_trace()
line = tracer_out_file.readline()
lnum += 1
return layers
def plot_tracer_data(v,layers_to_plot="All",filepath="TracerResults/tracer-out.txt",title='My_Plot',initial_or_final="fff",size=30,colormap='gist_rainbow',save_plot=False, preform_regression=False, degree_regression=1,display_max_min=False):
layers = read_tracer_out_file(filepath)
try:
legend = []
x_tot,y_tot,z_tot = [],[],[]
keys = map(float, layers.keys())
keys.sort(cmp=cart_comp)
keys = map(str, keys)
x_max,x_min,y_max,y_min,z_max,z_min = 0.,1e9,0.,1e9,0.,1e9
if preform_regression: all_x,all_y = [],[]
if layers_to_plot=="All": layers_to_plot=range(len(keys))
if ('Xmark' in v or 'Ymark' in v) and ('Dip' in v or 'Ang' in v or 'Dis' in v):
f = lambda p: p + '_short' if (p == 'Xmark' or p == 'Ymark') else p
v = map(f,v)
try: v1,v2,v3 = v
except ValueError: v1,v2 = v
x_time,y_time,z_time = None,None,None
if initial_or_final[0] == 'f': x_time = 'final'
elif initial_or_final[0] == 'i': x_time = 'initial'
elif initial_or_final[0] == 'd': x_time = 'difference'
if initial_or_final[1] == 'f': y_time = 'final'
elif initial_or_final[1] == 'i': y_time = 'initial'
elif initial_or_final[1] == 'd': y_time = 'difference'
if len(initial_or_final) > 2 and len(v) > 2 and initial_or_final[2] == 'f': z_time = 'final'
elif len(initial_or_final) > 2 and len(v) > 2 and initial_or_final[2] == 'i': z_time = 'initial'
elif len(initial_or_final) > 2 and len(v) > 2 and initial_or_final[2] == 'd': z_time = 'difference'
if not x_time and (not y_time or not z_time):
print(initial_or_final + " is not a valid time input options are [ff,if,fi,ii,iii,ifi,iff,fii,ffi,fif,fff]")
return
for n in layers_to_plot:
if n > len(keys) or n < -len(keys)-1:
print("n out of range no corrisponding layer, skipping for n = " + str(n))
continue
key = keys[n]
if x_time == 'difference':
l1 = map(float,layers[key]['final'][v1])
l2 = map(float,layers[key]['initial'][v1])
x = [a-b for a,b in zip(l1,l2)]
else: x = map(float,layers[key][x_time][v1])
if y_time == 'difference':
l1 = map(float,layers[key]['final'][v2])
l2 = map(float,layers[key]['initial'][v2])
y = [a-b for a,b in zip(l1,l2)]
else: y = map(float,layers[key][y_time][v2])
if not x or not y: continue
if len(v) > 2:
if z_time == 'difference':
l1 = map(float,layers[key]['final'][v3])
l2 = map(float,layers[key]['initial'][v3])
z = [a-b for a,b in zip(l1,l2)]
else: z = map(float(layers[key][z_time][v3]))
if not z: continue
if max(z) > z_max: z_max = max(z)
if min(z) < z_min: z_min = min(z)
if max(x) > x_max: x_max = max(x)
if min(x) < x_min: x_min = min(x)
if max(y) > y_max: y_max = max(y)
if min(y) < y_min: y_min = min(y)
x_tot += x
y_tot += y
if len(v) > 2:
z_tot += z
else:
z_tot += [int(float(key)) for i in range(len(x))]
plot_handle = plt.scatter(x_tot,y_tot,c=z_tot,s=size,cmap=colormap)
cbar = plt.colorbar()
if x_time == 'difference': v1 = 'delta_' + v1
if y_time == 'difference': v2 = 'delta_' + v2
if len(v) > 2 and z_time == 'difference': | |
<reponame>C6SUMMER/allinclusive-kodi-pi
#############################################################################
#
# Copyright (C) 2013 Navi-X
#
# This file is part of Navi-X.
#
# Navi-X is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# Navi-X is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Navi-X. If not, see <http://www.gnu.org/licenses/>.
#
#############################################################################
#############################################################################
#
# CDownloader:
# This class handles file downloads in a background task.
#############################################################################
from string import *
import sys, os.path
import urllib
import urllib2
import re, random, string
import xbmc, xbmcgui, xbmcaddon
import re, os, time, datetime, traceback
import shutil
import zipfile
import threading
import ftplib
import os
import socket
import time
from settings import *
from CPlayList import *
from CDialogBrowse import *
from CURLLoader import *
from libs2 import *
try: Emulating = xbmcgui.Emulating
except: Emulating = False
######################################################################
# Description: See comments in class body
######################################################################
#class myURLOpener(urllib.FancyURLopener):
# """Create sub-class in order to overide error 206. This error means a
# partial file is being sent,
# which is ok in this case. Do nothing with this error.
# """
# def http_error_206(self, url, fp, errcode, errmsg, headers, data=None):
# pass
######################################################################
# Description: File downloader including progress bar.
######################################################################
class CDownLoader(threading.Thread):
def __init__(self, *args, **kwargs):
if (kwargs.has_key('window')):
self.MainWindow = kwargs['window']
if (kwargs.has_key('playlist_src')):
self.playlist_src = kwargs['playlist_src']
if (kwargs.has_key('playlist_inc')):
self.playlist_inc = kwargs['playlist_inc']
if (kwargs.has_key('playlist_dst')):
self.playlist_dst = kwargs['playlist_dst']
threading.Thread.__init__(self)
self.setDaemon(True) #make a daemon thread
self.killed = False #not killed
self.running = False #at startup downloader is not running
self.shutdown = False #shutdown after all files downloaded
def run(self):
while self.killed == False:
time.sleep(1.0) #delay 1 second
#check if there are files in the download queue.
while (self.killed == False) and (self.running == True) and (self.playlist_src.size() > 0):
#there are files to be downloaded.
self.download_queue()
def download_start(self, shutdown = False):
self.shutdown = shutdown
self.running = True
def download_stop(self):
self.running = False
def download_isrunning(self):
return self.running
def kill(self):
self.killed = True
# def notify(self):
# self.event.set()
######################################################################
# Description: Downloads a URL to local disk
# Parameters : entry = media item
# Return : self.state (0=success, -1=failure, -2=cancel)
# self.dir (the new selected download dir)
# self.localfile (the destination path+file)
######################################################################
def browse(self, entry, dir=myDownloadsDir):
self.state = 0 #success
self.dir = ''
self.processed=False
URL=entry.URL
if (URL[:4] != 'http') and (URL[:3] != 'ftp'):
self.state = -1 #URL does not point to internet file.
return
if re.search('^http://(\w+\.)?(icefilms|mega(upload|video))\.', URL):
size_check_skip=True
else:
size_check_skip=False
if size_check_skip:
print("Mega URL; skipping size check")
size=0
urlopener = CURLLoader()
result = urlopener.geturl_processor(entry)
URL=entry.URL
loc_url=URL
self.processed=entry.processed
self.loc_url=URL
url_stripped = re.sub('\?.*$', '', loc_url) # strip GET-method args
url_stripped = re.sub('\|.*$', '', url_stripped) # strip header info if any
# find extension
match = re.search('(\.\w+)$',url_stripped)
if match is None:
#ext = ""
ext = getFileExtension(loc_url)
if ext != '':
ext = '.' + ext
else:
ext = match.group(1)
else:
ext, size = self.read_file_info(entry)
url_stripped = re.sub('\?.*$', '', entry.URL) # strip GET-method args
url_stripped = re.sub('\&.*$', '', entry.URL) # strip odd GET-method args
url_stripped = re.sub('\|.*$', '', url_stripped) # strip header info if any
if self.state != 0:
return
# For the local file name we use the playlist item 'name' field.
# But this string may contain invalid characters. Therefore
# we strip these invalid characters. We also limit the file
# name length to 42 which is the XBMC XBOX limit.
if re.search('^Source #', entry.name):
localfile=url_stripped[url_stripped.rindex("/")+1:]
else:
localfile = re.sub('[^\w\s-]', '', entry.name) # remove characters which are not a letter, digit, white-space, underscore, or dash
localfile = re.sub('\s+', ' ', localfile) # convert all instances of multiple spaces to single spaces
localfile = localfile[:(42-len(ext))] # limit to 42 characters.
localfile = localfile + ext
if size_check_skip:
heading="Download File"
else:
size_string, raw_size = self.file_size(size,'')
heading = "Download File: (Size = %s)" % size_string
if (entry.type=='playlist') and (localfile.lower().endswith('.plx')==False):
localfile+='.plx'
#browsewnd = CDialogBrowse("CBrowseskin.xml", os.getcwd())
curdir = addon.getAddonInfo('path')
browsewnd = CDialogBrowse("CBrowseskin2.xml", curdir)
browsewnd.SetFile(dir, localfile, 3, heading)
browsewnd.doModal()
if browsewnd.state != 0:
self.state = -2 #cancel download
return
self.localfile = browsewnd.dir + browsewnd.filename
self.dir = browsewnd.dir
#Check if the file already exists
if os.path.exists(self.localfile):
dialog = xbmcgui.Dialog()
if dialog.yesno("Message", "The destination file already exists, continue?") == False:
self.state = -2 #cancel download
#end of function.
######################################################################
# Description: Retrieve the file extenstion and size of a URL
# Parameters : entry = mediaitem.
# Return : the file extension (ext) and file size (size)
######################################################################
def read_file_info(self, entry):
self.state = 0 #success
ext='' #no extension
size = 0
try:
URL, headers = parse_headers(entry.URL)
if URL[:3] == 'ftp':
#FTP
ext = getFileExtension(URL)
if ext != '':
ext = '.' + ext
else:
#HTTP
urlopener = CURLLoader()
result = urlopener.urlopen(URL, entry);
if result["code"] != 0:
self.state = -1; print('URL does not point to internet file.')
return ext, size
loc_url = urlopener.loc_url#; print('line223 loc_url= ' +str(loc_url))
self.processed=urlopener.processed#; print('self.processed= ' +str(self.processed))
#Now we try to open the URL. If it does not exist an error is
#returned.
try:
#headers = { 'User-Agent' : 'Mozilla/4.0 (compatible;MSIE 7.0;Windows NT 6.0)'}
req = urllib2.Request(loc_url, None, headers)
size_string,size_raw = self.file_size(0,req)
size = int(size_raw)
except Exception, e: size = 0; print('ERROR line 237' +str(e))
#loc_url=f.geturl()
try:
#special handing for some URL's
pos = URL.find('http://www.youtube.com') #find last 'http' in the URL
if pos != -1:
ext='.mp4'
else:
#todo: deprecated
pos = URL.find("flyupload.com")
if pos != -1:
ext='.avi'
else:
#extract the file extension
url_stripped = re.sub('\?.*$', '', loc_url) # strip GET-method args
re_ext = re.compile('(\.\w+)$') # find extension
match = re_ext.search(url_stripped)
if match is None:
#ext = ""
ext = getFileExtension(loc_url)
if ext != '':
ext = '.' + ext
else:
ext = match.group(1)
except Exception, e: print('ERROR line 261','e =' +str(e))
# processed youtube URL
#the code below is failing. Do we still need it?
# match=re.search('youtube\.com/.*?&itag=(\d+)', loc_url)
# if match:
# fmt=int(match.group(1))
# if [5,6,34,35].index(fmt) >= 0:
# ext='.flv'
# elif [43,44,45,46,100,101,46,102].index(fmt) >= 0:
# ext='.webm'
# else:
# ext='.mp4' # [18,22,37,38,83,82,85,84] - default to instead of testing for
# safety net
if len(ext)>6:
ext='.avi'
except Exception,e:
print '\t\t\t Error CDL 278 ' + str(e)
return ext, size
######################################################################
# Description: Adds an item to the local playlists: queue, incomplete downloads,
# or completed downloads. while removing duplicate entries
# Parameters : URL=source
# Return : -
######################################################################
def add_list(self, entry,item_list):
#if item_list == 'incdl': loc_list = RootDir + incomplete_downloads; playlist = self.playlist_inc
#elif item_list== 'cmpdl': loc_list = RootDir + downloads_complete; playlist = self.playlist_dst
#else: item_list ='queue'; loc_list = RootDir + downloads_queue; playlist = self.playlist_src
if item_list == 'incdl': loc_list = datapaths + incomplete_downloads; playlist = self.playlist_inc
elif item_list== 'cmpdl': loc_list = datapaths + downloads_complete; playlist = self.playlist_dst
else: item_list ='queue'; loc_list = datapaths + downloads_queue; playlist = self.playlist_src
self.state = 0 #success
tmp = CMediaItem() #create new item
tmp.type = entry.type
tmp.name = entry.name
tmp.thumb = entry.thumb
tmp.URL = entry.URL
tmp.DLloc = entry.DLloc
tmp.player = entry.player
tmp.processor = entry.processor
tmp.background = entry.background
#### remove duplicates from list then add new item
pos = 0
for line in open(loc_list,'r'):
if line == '#\n' : pos+=1
elif entry.DLloc in line: playlist.remove(pos-1)
playlist.save(loc_list)
playlist.add(tmp); playlist.save(loc_list)
######################################################################
# Description: Downloads a URL to local disk
# Parameters : shutdown = true if auto shutdown after download.
# Return : -
######################################################################
def download_queue(self, shutdown = False):
self.state = 0 #success
counter = 0
self.MainWindow.download_logo.setVisible(1)
self.MainWindow.dlinfotekst.setVisible(1)
while (self.state != -2) and (self.playlist_src.size() > 0) and (self.killed == False) and (self.running == True):
header = str(counter+1) + " of " + str(self.playlist_src.size()+counter)
self.download_file(self.playlist_src.list[0], header) #download single file
if self.state == | |
# Generated by Snowball 2.0.0 - https://snowballstem.org/
from .basestemmer import BaseStemmer
from .among import Among
class EnglishStemmer(BaseStemmer):
'''
This class implements the stemming algorithm defined by a snowball script.
Generated by Snowball 2.0.0 - https://snowballstem.org/
'''
a_0 = [
Among(u"arsen", -1, -1),
Among(u"commun", -1, -1),
Among(u"gener", -1, -1)
]
a_1 = [
Among(u"'", -1, 1),
Among(u"'s'", 0, 1),
Among(u"'s", -1, 1)
]
a_2 = [
Among(u"ied", -1, 2),
Among(u"s", -1, 3),
Among(u"ies", 1, 2),
Among(u"sses", 1, 1),
Among(u"ss", 1, -1),
Among(u"us", 1, -1)
]
a_3 = [
Among(u"", -1, 3),
Among(u"bb", 0, 2),
Among(u"dd", 0, 2),
Among(u"ff", 0, 2),
Among(u"gg", 0, 2),
Among(u"bl", 0, 1),
Among(u"mm", 0, 2),
Among(u"nn", 0, 2),
Among(u"pp", 0, 2),
Among(u"rr", 0, 2),
Among(u"at", 0, 1),
Among(u"tt", 0, 2),
Among(u"iz", 0, 1)
]
a_4 = [
Among(u"ed", -1, 2),
Among(u"eed", 0, 1),
Among(u"ing", -1, 2),
Among(u"edly", -1, 2),
Among(u"eedly", 3, 1),
Among(u"ingly", -1, 2)
]
a_5 = [
Among(u"anci", -1, 3),
Among(u"enci", -1, 2),
Among(u"ogi", -1, 13),
Among(u"li", -1, 15),
Among(u"bli", 3, 12),
Among(u"abli", 4, 4),
Among(u"alli", 3, 8),
Among(u"fulli", 3, 9),
Among(u"lessli", 3, 14),
Among(u"ousli", 3, 10),
Among(u"entli", 3, 5),
Among(u"aliti", -1, 8),
Among(u"biliti", -1, 12),
Among(u"iviti", -1, 11),
Among(u"tional", -1, 1),
Among(u"ational", 14, 7),
Among(u"alism", -1, 8),
Among(u"ation", -1, 7),
Among(u"ization", 17, 6),
Among(u"izer", -1, 6),
Among(u"ator", -1, 7),
Among(u"iveness", -1, 11),
Among(u"fulness", -1, 9),
Among(u"ousness", -1, 10)
]
a_6 = [
Among(u"icate", -1, 4),
Among(u"ative", -1, 6),
Among(u"alize", -1, 3),
Among(u"iciti", -1, 4),
Among(u"ical", -1, 4),
Among(u"tional", -1, 1),
Among(u"ational", 5, 2),
Among(u"ful", -1, 5),
Among(u"ness", -1, 5)
]
a_7 = [
Among(u"ic", -1, 1),
Among(u"ance", -1, 1),
Among(u"ence", -1, 1),
Among(u"able", -1, 1),
Among(u"ible", -1, 1),
Among(u"ate", -1, 1),
Among(u"ive", -1, 1),
Among(u"ize", -1, 1),
Among(u"iti", -1, 1),
Among(u"al", -1, 1),
Among(u"ism", -1, 1),
Among(u"ion", -1, 2),
Among(u"er", -1, 1),
Among(u"ous", -1, 1),
Among(u"ant", -1, 1),
Among(u"ent", -1, 1),
Among(u"ment", 15, 1),
Among(u"ement", 16, 1)
]
a_8 = [
Among(u"e", -1, 1),
Among(u"l", -1, 2)
]
a_9 = [
Among(u"succeed", -1, -1),
Among(u"proceed", -1, -1),
Among(u"exceed", -1, -1),
Among(u"canning", -1, -1),
Among(u"inning", -1, -1),
Among(u"earring", -1, -1),
Among(u"herring", -1, -1),
Among(u"outing", -1, -1)
]
a_10 = [
Among(u"andes", -1, -1),
Among(u"atlas", -1, -1),
Among(u"bias", -1, -1),
Among(u"cosmos", -1, -1),
Among(u"dying", -1, 3),
Among(u"early", -1, 9),
Among(u"gently", -1, 7),
Among(u"howe", -1, -1),
Among(u"idly", -1, 6),
Among(u"lying", -1, 4),
Among(u"news", -1, -1),
Among(u"only", -1, 10),
Among(u"singly", -1, 11),
Among(u"skies", -1, 2),
Among(u"skis", -1, 1),
Among(u"sky", -1, -1),
Among(u"tying", -1, 5),
Among(u"ugly", -1, 8)
]
g_v = [17, 65, 16, 1]
g_v_WXY = [1, 17, 65, 208, 1]
g_valid_LI = [55, 141, 2]
B_Y_found = False
I_p2 = 0
I_p1 = 0
def __r_prelude(self):
# (, line 25
# unset Y_found, line 26
self.B_Y_found = False
# do, line 27
v_1 = self.cursor
try:
# (, line 27
# [, line 27
self.bra = self.cursor
# literal, line 27
if not self.eq_s(u"'"):
raise lab0()
# ], line 27
self.ket = self.cursor
# delete, line 27
if not self.slice_del():
return False
except lab0: pass
self.cursor = v_1
# do, line 28
v_2 = self.cursor
try:
# (, line 28
# [, line 28
self.bra = self.cursor
# literal, line 28
if not self.eq_s(u"y"):
raise lab1()
# ], line 28
self.ket = self.cursor
# <-, line 28
if not self.slice_from(u"Y"):
return False
# set Y_found, line 28
self.B_Y_found = True
except lab1: pass
self.cursor = v_2
# do, line 29
v_3 = self.cursor
try:
# repeat, line 29
while True:
v_4 = self.cursor
try:
# (, line 29
# goto, line 29
try:
while True:
v_5 = self.cursor
try:
# (, line 29
if not self.in_grouping(EnglishStemmer.g_v, 97, 121):
raise lab5()
# [, line 29
self.bra = self.cursor
# literal, line 29
if not self.eq_s(u"y"):
raise lab5()
# ], line 29
self.ket = self.cursor
self.cursor = v_5
raise lab4()
except lab5: pass
self.cursor = v_5
if self.cursor >= self.limit:
raise lab3()
self.cursor += 1
except lab4: pass
# <-, line 29
if not self.slice_from(u"Y"):
return False
# set Y_found, line 29
self.B_Y_found = True
continue
except lab3: pass
self.cursor = v_4
break
except lab2: pass
self.cursor = v_3
return True
def __r_mark_regions(self):
# (, line 32
self.I_p1 = self.limit
self.I_p2 = self.limit
# do, line 35
v_1 = self.cursor
try:
# (, line 35
# or, line 41
try:
v_2 = self.cursor
try:
# among, line 36
if self.find_among(EnglishStemmer.a_0) == 0:
raise lab2()
raise lab1()
except lab2: pass
self.cursor = v_2
# (, line 41
# gopast grouping v, line 41
if not self.go_out_grouping(EnglishStemmer.g_v, 97, 121):
raise lab0()
self.cursor += 1
# gopast non v, line 41
if not self.go_in_grouping(EnglishStemmer.g_v, 97, 121):
raise lab0()
self.cursor += 1
except lab1: pass
# setmark p1, line 42
self.I_p1 = self.cursor
# gopast grouping v, line 43
if not self.go_out_grouping(EnglishStemmer.g_v, 97, 121):
raise lab0()
self.cursor += 1
# gopast non v, line 43
if not self.go_in_grouping(EnglishStemmer.g_v, 97, 121):
raise lab0()
self.cursor += 1
# setmark p2, line 43
self.I_p2 = self.cursor
except lab0: pass
self.cursor = v_1
return True
def __r_shortv(self):
# (, line 49
# or, line 51
try:
v_1 = self.limit - self.cursor
try:
# (, line 50
if not self.out_grouping_b(EnglishStemmer.g_v_WXY, 89, 121):
raise lab1()
if not self.in_grouping_b(EnglishStemmer.g_v, 97, 121):
raise lab1()
if not self.out_grouping_b(EnglishStemmer.g_v, 97, 121):
raise lab1()
raise lab0()
except lab1: pass
self.cursor = self.limit - v_1
# (, line 52
if not self.out_grouping_b(EnglishStemmer.g_v, 97, 121):
return False
if not self.in_grouping_b(EnglishStemmer.g_v, 97, 121):
return False
# atlimit, line 52
if self.cursor > self.limit_backward:
return False
except lab0: pass
return True
def __r_R1(self):
if not self.I_p1 <= self.cursor:
return False
return True
def __r_R2(self):
if not self.I_p2 <= self.cursor:
return False
return True
def __r_Step_1a(self):
# (, line 58
# try, line 59
v_1 = self.limit - self.cursor
try:
# (, line 59
# [, line 60
self.ket = self.cursor
# substring, line 60
if self.find_among_b(EnglishStemmer.a_1) == 0:
self.cursor = self.limit - v_1
raise lab0()
# ], line 60
self.bra = self.cursor
# (, line 62
# delete, line 62
if not self.slice_del():
return False
except lab0: pass
# [, line 65
self.ket = self.cursor
# substring, line 65
among_var = self.find_among_b(EnglishStemmer.a_2)
if among_var == 0:
return False
# ], line 65
self.bra = self.cursor
if among_var == 1:
# (, line 66
# <-, line 66
if not self.slice_from(u"ss"):
return False
elif among_var == 2:
# (, line 68
# or, line 68
try:
v_2 = self.limit - self.cursor
try:
# (, line 68
# hop, line 68
c = self.cursor - 2
if self.limit_backward > c or c > self.limit:
raise lab2()
self.cursor = c
# <-, line 68
if not self.slice_from(u"i"):
return False
raise lab1()
except lab2: pass
self.cursor = self.limit - v_2
# <-, line 68
if not self.slice_from(u"ie"):
return False
except lab1: pass
elif among_var == 3:
# (, line 69
# next, line 69
if self.cursor <= self.limit_backward:
return False
self.cursor -= 1
# gopast grouping v, line 69
if not self.go_out_grouping_b(EnglishStemmer.g_v, 97, 121):
return False
self.cursor -= 1
# delete, line 69
if not self.slice_del():
return False
return True
def __r_Step_1b(self):
# (, line 74
# [, line 75
self.ket = self.cursor
# substring, line 75
among_var = self.find_among_b(EnglishStemmer.a_4)
if among_var == 0:
return False
# ], line 75
self.bra = self.cursor
if among_var == 1:
# (, line 77
# call R1, line 77
if not self.__r_R1():
return False
# <-, line 77
if not self.slice_from(u"ee"):
return False
elif among_var == 2:
# (, line 79
# test, line 80
v_1 = self.limit - self.cursor
# gopast grouping v, line 80
if not self.go_out_grouping_b(EnglishStemmer.g_v, 97, 121):
return False
self.cursor -= 1
self.cursor = self.limit - v_1
# delete, line 80
if not self.slice_del():
return False
# test, line 81
v_2 = self.limit - | |
"""
This module provides classes that support observers, smart value handling and debug functions
All changes to values nominate an agent, and observers nominate the agent making changes they
are interested in.
It supercedes the pvars module
"""
import logging, sys, threading, pathlib, math, json
from enum import Enum, auto as enumauto, Flag
class loglvls(Enum):
"""
A class for logging levels so data is self identfying
"""
VAST = logging.DEBUG-1
DEBUG = logging.DEBUG
INFO = logging.INFO
WARN = logging.WARN
ERROR = logging.ERROR
FATAL = logging.FATAL
NONE = 0
class myagents(Flag):
NONE = 0
app = enumauto()
user = enumauto()
class wflags(Flag):
NONE = 0
DISABLED = enumauto()
class watchable():
"""
provides a 'smart' object that provides basic observer functionality around an object.
Changes to the value can be policed, and updates have to provide an agent that is
performing the update. Observers can then request to be notified when the value is changed
by specific agents.
"""
def __init__(self, value, app, flags=wflags.NONE, loglevel=loglvls.INFO):
"""
creates a new watchable. Initialises the internal value and sets an empty observers list
value: the initial value for the object. Not validated!
app : the app instance for this. Used for logging and for validating agents
"""
self._val=value
self.app=app
self.observers=None
self.oblock=threading.Lock()
self.flags=flags
self.loglevel=loglevel
self.log(loglvls.DEBUG, 'watchable type %s setup with value %s' % (type(self).__name__, self._val))
def setValue(self, value, agent):
"""
Updates the value of a watchable or the loglevel.
if not a loglevel, this validates and converts (if relevant) the requested value.
If the value is valid and different from the current value, checks for and calls
any observers interested in changes by the given agent.
"""
if isinstance(value, loglvls):
self.loglevel = value
return False
if isinstance(value, wflags):
self.flags=value
return False
assert isinstance(agent, self.app.agentclass), 'unexpected value %s of type %s in setValue' % (value, type(value).__name__)
newvalue=self.validValue(value, agent)
if newvalue != self._val:
self.notify(newvalue, agent)
return True
else:
self.log(loglvls.DEBUG,'value unchanged (%s)' % self._val)
return False
def getValue(self):
return self._val
def validValue(self, value, agent=None):
"""
validates the given value and returns the canonical value which will be stored.
Raise an exception if the value is invalid
'Real' classes must implement this
"""
raise NotImplementedError()
def notify(self, newvalue, agent):
if self.observers:
clist=None
with self.oblock:
if agent in self.observers:
clist=self.observers[agent].copy()
oldvalue=self._val
self._val=newvalue
if clist:
for ob in clist:
ob(oldValue=oldvalue, newValue=newvalue, agent=agent, watched=self)
self.log(loglvls.DEBUG,'value changed (%s)- observers called' % self._val)
else:
self._val=newvalue
self.log(loglvls.DEBUG,'value changed (%s)- no observers' % self._val)
def addNotify(self, callback, agent):
assert callable(callback)
assert isinstance(agent, self.app.agentclass)
self.log(loglvls.DEBUG,'added watcher %s' % callback.__name__)
with self.oblock:
if self.observers is None:
self.observers={agent:[callback]}
elif agent in self.observers:
self.observers[agent].append(callback)
else:
self.observers[agent]=[callback]
def dropNotify(self, callback, agent):
with self.oblock:
aglist=self.observers[agent]
ix = aglist.index(callback)
aglist.pop(ix)
def log(self, loglevel, *args, **kwargs):
"""
request a logging operation. This does nothing if the given loglevel is < the loglevel set in the object
"""
if loglevel.value >= self.loglevel.value:
self.app.log(loglevel, *args, **kwargs)
class textWatch(watchable):
"""
A refinement of watchable for text strings.
"""
def validValue(self, value, agent):
"""
value : the requested new value for the field, can be anything that str() takes, but None will fail.
agent : who asked for then change (ignored here)
returns : the valid new value (this is always a str)
raises : Any error that str() can raise
"""
if value is None:
raise ValueError('None is not a valid textVar value')
return str(value)
class floatWatch(watchable):
"""
A refinement of watchable that restricts the value to numbers - simple floating point.
"""
def __init__(self, *, maxv=sys.float_info.max, minv=-sys.float_info.max, clamp=False, allowNaN=True, **kwargs):
"""
Makes a float given min and max values. The value can be set clamped to prevent failures
minv : the lowest allowed value - use 0 to allow only positive numbers
maxv : the highest value allowed
clamp : if True all values that can float() are accepted for updating, but are restricted to be between minv and maxv
"""
self.maxv=float(maxv)
self.minv=float(minv)
self.clamp=clamp==True
self.allowNaN=allowNaN
super().__init__(**kwargs)
def validValue(self, value, agent):
"""
value : the requested new value for the field, can be anything that float(x) can handle that is between minv and maxv
- or if clamp is True, any value
agent : who asked for then change (ignored here)
returns : the valid new value (this is always a float)
raises : ValueError if the provided value is invalid
"""
av=float(value)
if math.isnan(av) and self.allowNaN:
return av
if self.clamp:
return self.minv if av < self.minv else self.maxv if av > self.maxv else av
if self.minv <= av <= self.maxv:
return av
raise ValueError('value {} is outside range {} to {}'.format(value, self.minv, self.maxv))
class intWatch(watchable):
"""
A refinement of watchable that restricts the field value to integer numbers optionally within a range.
"""
def __init__(self, maxv=None, minv=None, clamp=False, **kwargs):
"""
creates an integer var
maxv: None if unbounded maximum else anything that int() accepts
minv: None if unbounded minimum else anything that int() accepts
clamp: if True then value is clamped to maxv and minv (either can be None for unbounded in either 'direction'
"""
self.maxv=maxv if maxv is None else int(maxv)
self.minv=minv if minv is None else int(minv)
self.clamp=clamp==True
super().__init__(**kwargs)
def validValue(self, value, agent):
"""
value : the requested new value for the field, can be anything that int() can handle that is between minv and maxv
- or if clamp is True, any value
agent : who asked for then change (ignored here)
returns : the valid new value (this is always an int)
raises : ValueError if the provided value is invalid
"""
av=int(value)
if self.clamp:
if not self.minv is None and av < self.minv:
return self.minv
if not self.maxv is None and av > self.maxv:
return self.maxv
return av
if (self.minv is None or av >= self.minv) and (self.maxv is None or av <= self.maxv):
return av
raise ValueError('value {} is outside range {} to {} for watchable'.format(value, self.minv, self.maxv))
def increment(self, agent, count=1):
incer=int(count)
newval=self.getValue()+incer
self.setValue(newval, agent)
return newval
class enumWatch(watchable):
"""
a watchable that can only take a specific set of values, and can wrap / clamp values.
It also allows values to be cycled through
"""
def __init__(self, vlist, wrap=True, clamp=False, **kwargs):
self.wrap=wrap == True
self.clamp=clamp == True
self.vlist=vlist
super().__init__(**kwargs)
def validValue(self, value, agent):
if not value in self.vlist:
raise ValueError('value (%s) not valid' % value)
return value
def getIndex(self):
return self.vlist.index(self._val)
def increment(self, agent, inc=1):
newi=self.getIndex()+inc
if 0 <= newi < len(self.vlist):
return self.setValue(self.vlist[newi], agent)
elif self.wrap:
if newi < 0:
useval = self.vlist[-1]
else:
useval = self.vlist[0]
elif self.clamp:
if newi < 0:
useval = self.vlist[0]
else:
useval = self.vlist[-1]
else:
raise ValueError('operation exceeds list boundary')
self.setValue(useval, agent)
def setIndex(self, ival, agent):
if 0 <= ival < len(self.vlist):
return self.setValue(self.vlist[ival], agent)
else:
raise ValueError('index out of range')
class btnWatch(watchable):
"""
For simple click buttons that always notify
"""
def setValue(self, value, agent):
if isinstance(value, loglvls):
self.loglevel = value
return False
if isinstance(value, wflags):
self.flags=value
return False
assert isinstance(agent, self.app.agentclass)
self.notify(self._val, agent)
return True
class folderWatch(watchable):
"""
Internally. the value is a pathlib path to a folder (subfolders are created automatically).
"""
def __init__(self, value, **kwargs):
super().__init__(value=self.validValue(value, None), **kwargs)
def validValue(self, value, agent):
tp=pathlib.Path(value).expanduser()
if tp.exists():
if tp.is_dir():
return tp
else:
raise ValueError('%s is not a folder' % str(tp))
else:
tp.mkdir(parents=True, exist_ok=True)
return tp
def getValue(self):
return str(self._val)
def getFolder(self):
return self._val
def currentfilenames(self, includes=None, excludes=None):
"""
returns names of files currently in this folder
"""
return [pp.name for pp in self.getValue().iterdir() if pp.is_file() and
(True if includes is None else [1 for x in includes if pp.name.endswith(x)]) and
(True if excludes is None else [1 for x in excludes if not pp.name.endswith(x)])]
class watchablegroup(object):
def __init__(self, value, wabledefs, loglevel=None):
"""
value : dict of preferred values for watchables in this activity (e.g. from saved settings file)
| |
Linux user account.
Fields:
gecos: [Output Only] The GECOS (user information) entry for this account.
gid: [Output Only] User's default group ID.
homeDirectory: [Output Only] The path to the home directory for this
account.
shell: [Output Only] The path to the login shell for this account.
uid: [Output Only] User ID.
username: [Output Only] The username of the account.
"""
gecos = _messages.StringField(1)
gid = _messages.IntegerField(2, variant=_messages.Variant.UINT32)
homeDirectory = _messages.StringField(3)
shell = _messages.StringField(4)
uid = _messages.IntegerField(5, variant=_messages.Variant.UINT32)
username = _messages.StringField(6)
class Operation(_messages.Message):
"""An Operation resource, used to manage asynchronous API requests.
Enums:
StatusValueValuesEnum: [Output Only] The status of the operation, which
can be one of the following: PENDING, RUNNING, or DONE.
Messages:
ErrorValue: [Output Only] If errors are generated during processing of the
operation, this field will be populated.
WarningsValueListEntry: A WarningsValueListEntry object.
Fields:
clientOperationId: [Output Only] Reserved for future use.
creationTimestamp: [Deprecated] This field is deprecated.
description: [Output Only] A textual description of the operation, which
is set when the operation is created.
endTime: [Output Only] The time that this operation was completed. This
value is in RFC3339 text format.
error: [Output Only] If errors are generated during processing of the
operation, this field will be populated.
httpErrorMessage: [Output Only] If the operation fails, this field
contains the HTTP error message that was returned, such as NOT FOUND.
httpErrorStatusCode: [Output Only] If the operation fails, this field
contains the HTTP error status code that was returned. For example, a
404 means the resource was not found.
id: [Output Only] The unique identifier for the resource. This identifier
is defined by the server.
insertTime: [Output Only] The time that this operation was requested. This
value is in RFC3339 text format.
kind: [Output Only] Type of the resource. Always compute#operation for
Operation resources.
name: [Output Only] Name of the resource.
operationType: [Output Only] The type of operation, such as insert,
update, or delete, and so on.
progress: [Output Only] An optional progress indicator that ranges from 0
to 100. There is no requirement that this be linear or support any
granularity of operations. This should not be used to guess when the
operation will be complete. This number should monotonically increase as
the operation progresses.
region: [Output Only] The URL of the region where the operation resides.
Only available when performing regional operations.
selfLink: [Output Only] Server-defined URL for the resource.
startTime: [Output Only] The time that this operation was started by the
server. This value is in RFC3339 text format.
status: [Output Only] The status of the operation, which can be one of the
following: PENDING, RUNNING, or DONE.
statusMessage: [Output Only] An optional textual description of the
current status of the operation.
targetId: [Output Only] The unique target ID, which identifies a specific
incarnation of the target resource.
targetLink: [Output Only] The URL of the resource that the operation
modifies. For operations related to creating a snapshot, this points to
the persistent disk that the snapshot was created from.
user: [Output Only] User who requested the operation, for example:
<EMAIL>.
warnings: [Output Only] If warning messages are generated during
processing of the operation, this field will be populated.
zone: [Output Only] The URL of the zone where the operation resides. Only
available when performing per-zone operations.
"""
class StatusValueValuesEnum(_messages.Enum):
"""[Output Only] The status of the operation, which can be one of the
following: PENDING, RUNNING, or DONE.
Values:
DONE: <no description>
PENDING: <no description>
RUNNING: <no description>
"""
DONE = 0
PENDING = 1
RUNNING = 2
class ErrorValue(_messages.Message):
"""[Output Only] If errors are generated during processing of the
operation, this field will be populated.
Messages:
ErrorsValueListEntry: A ErrorsValueListEntry object.
Fields:
errors: [Output Only] The array of errors encountered while processing
this operation.
"""
class ErrorsValueListEntry(_messages.Message):
"""A ErrorsValueListEntry object.
Fields:
code: [Output Only] The error type identifier for this error.
location: [Output Only] Indicates the field in the request that caused
the error. This property is optional.
message: [Output Only] An optional, human-readable error message.
"""
code = _messages.StringField(1)
location = _messages.StringField(2)
message = _messages.StringField(3)
errors = _messages.MessageField('ErrorsValueListEntry', 1, repeated=True)
class WarningsValueListEntry(_messages.Message):
"""A WarningsValueListEntry object.
Enums:
CodeValueValuesEnum: [Output Only] A warning code, if applicable. For
example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no
results in the response.
Messages:
DataValueListEntry: A DataValueListEntry object.
Fields:
code: [Output Only] A warning code, if applicable. For example, Compute
Engine returns NO_RESULTS_ON_PAGE if there are no results in the
response.
data: [Output Only] Metadata about this warning in key: value format.
For example: "data": [ { "key": "scope", "value": "zones/us-east1-d" }
message: [Output Only] A human-readable description of the warning code.
"""
class CodeValueValuesEnum(_messages.Enum):
"""[Output Only] A warning code, if applicable. For example, Compute
Engine returns NO_RESULTS_ON_PAGE if there are no results in the
response.
Values:
CLEANUP_FAILED: <no description>
DEPRECATED_RESOURCE_USED: <no description>
DISK_SIZE_LARGER_THAN_IMAGE_SIZE: <no description>
FIELD_VALUE_OVERRIDEN: <no description>
INJECTED_KERNELS_DEPRECATED: <no description>
NEXT_HOP_ADDRESS_NOT_ASSIGNED: <no description>
NEXT_HOP_CANNOT_IP_FORWARD: <no description>
NEXT_HOP_INSTANCE_NOT_FOUND: <no description>
NEXT_HOP_INSTANCE_NOT_ON_NETWORK: <no description>
NEXT_HOP_NOT_RUNNING: <no description>
NOT_CRITICAL_ERROR: <no description>
NO_RESULTS_ON_PAGE: <no description>
REQUIRED_TOS_AGREEMENT: <no description>
RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING: <no description>
RESOURCE_NOT_DELETED: <no description>
SINGLE_INSTANCE_PROPERTY_TEMPLATE: <no description>
UNREACHABLE: <no description>
"""
CLEANUP_FAILED = 0
DEPRECATED_RESOURCE_USED = 1
DISK_SIZE_LARGER_THAN_IMAGE_SIZE = 2
FIELD_VALUE_OVERRIDEN = 3
INJECTED_KERNELS_DEPRECATED = 4
NEXT_HOP_ADDRESS_NOT_ASSIGNED = 5
NEXT_HOP_CANNOT_IP_FORWARD = 6
NEXT_HOP_INSTANCE_NOT_FOUND = 7
NEXT_HOP_INSTANCE_NOT_ON_NETWORK = 8
NEXT_HOP_NOT_RUNNING = 9
NOT_CRITICAL_ERROR = 10
NO_RESULTS_ON_PAGE = 11
REQUIRED_TOS_AGREEMENT = 12
RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING = 13
RESOURCE_NOT_DELETED = 14
SINGLE_INSTANCE_PROPERTY_TEMPLATE = 15
UNREACHABLE = 16
class DataValueListEntry(_messages.Message):
"""A DataValueListEntry object.
Fields:
key: [Output Only] A key that provides more detail on the warning
being returned. For example, for warnings where there are no results
in a list request for a particular zone, this key might be scope and
the key value might be the zone name. Other examples might be a key
indicating a deprecated resource and a suggested replacement, or a
warning about invalid network settings (for example, if an instance
attempts to perform IP forwarding but is not enabled for IP
forwarding).
value: [Output Only] A warning data value corresponding to the key.
"""
key = _messages.StringField(1)
value = _messages.StringField(2)
code = _messages.EnumField('CodeValueValuesEnum', 1)
data = _messages.MessageField('DataValueListEntry', 2, repeated=True)
message = _messages.StringField(3)
clientOperationId = _messages.StringField(1)
creationTimestamp = _messages.StringField(2)
description = _messages.StringField(3)
endTime = _messages.StringField(4)
error = _messages.MessageField('ErrorValue', 5)
httpErrorMessage = _messages.StringField(6)
httpErrorStatusCode = _messages.IntegerField(7, variant=_messages.Variant.INT32)
id = _messages.IntegerField(8, variant=_messages.Variant.UINT64)
insertTime = _messages.StringField(9)
kind = _messages.StringField(10, default=u'clouduseraccounts#operation')
name = _messages.StringField(11)
operationType = _messages.StringField(12)
progress = _messages.IntegerField(13, variant=_messages.Variant.INT32)
region = _messages.StringField(14)
selfLink = _messages.StringField(15)
startTime = _messages.StringField(16)
status = _messages.EnumField('StatusValueValuesEnum', 17)
statusMessage = _messages.StringField(18)
targetId = _messages.IntegerField(19, variant=_messages.Variant.UINT64)
targetLink = _messages.StringField(20)
user = _messages.StringField(21)
warnings = _messages.MessageField('WarningsValueListEntry', 22, repeated=True)
zone = _messages.StringField(23)
class OperationList(_messages.Message):
"""Contains a list of Operation resources.
Fields:
id: [Output Only] The unique identifier for the resource. This identifier
is defined by the server.
items: [Output Only] A list of Operation resources.
kind: [Output Only] Type of resource. Always compute#operations for
Operations resource.
nextPageToken: [Output Only] This token allows you to get the next page of
results for list requests. If the number of results is larger than
maxResults, use the nextPageToken as a value for the query parameter
pageToken in the next list request. Subsequent list requests will have
their own nextPageToken to continue paging through the results.
selfLink: [Output Only] Server-defined URL for this resource.
"""
id = _messages.StringField(1)
items = _messages.MessageField('Operation', 2, repeated=True)
kind = _messages.StringField(3, default=u'clouduseraccounts#operationList')
nextPageToken = _messages.StringField(4)
selfLink = _messages.StringField(5)
class PublicKey(_messages.Message):
"""A public key for authenticating to guests.
Fields:
creationTimestamp: [Output Only] Creation timestamp in RFC3339 text
format.
description: An optional textual description of the resource; provided by
the client when the resource is created.
expirationTimestamp: Optional expiration timestamp. If provided, the
timestamp must be in RFC3339 text format. If not provided, the public
key never expires.
fingerprint: [Output Only] The fingerprint of the key is defined by
RFC4716 to be the | |
address current challenges with industry, designed through academia, industry and students participation"
"Focuses on a wide variety of core courses, electives including cutting edge technologies like Artificial Intelligence, Big Data Analytics, Machine Learning, IoT and more."
"For detailed information about the curriculum please visit the following link!"
"<a href='https://kjsce.somaiya.edu/en/programme/bachelor-of-technology-electronic-telecommunication'>syllabus</a>"
#"https://kjsce.somaiya.edu/en/programme/bachelor-of-technology-electronic-telecommunication"
},
"placement": {
"The Placement Process at KJSCE acts as a link between the expectations of the Recruiters with the dreams of the students. "
"In year 2019-2020, we had 84 Companies visiting for recruitment."
"The highest package received was Rs.18.75 lpa and overall average of Rs.5.55 lpa with over 600+ successfull placements."
"For more information please visit the following link!"
"<a href='https://kjsce.somaiya.edu/en/placement/overview'>placement</a>"
#"https://kjsce.somaiya.edu/en/placement/overview"
},
"student" : {
"We at the K J Somaiya College of Engineering continuously strive to excel in academic along with various Co-curricular and extra-curricular activities. Extracurricular activities are not the components of the academic curriculum but an integral part of the educational environment for the intellectual development of an engineer. It will explore hidden talent inside you, it will make you more adaptive and flexible and help you to become a smart engineer not only just an engineering graduate."
"IETE - The Institute of Electronics & Telecommunication Engineers - Institute Students’ Forum, ( IETE – KJSISF ) , started in the year 2001."
"EESA - The Electronics Engineers Students’ Association was established in the year 1988. The association promotes activities such as paper presentations, quizzes, seminars, etc.It arranges for preparatory guidance lectures and also conducts mock tests and group discussions for CAT, GRE etc."
"For information regarding all the councils and student organisations please visit the following link:"
"<a href='https://kjsce.somaiya.edu/en/students-association'>student body</a>"
#"https://kjsce.somaiya.edu/en/students-association"
},
"programes" :{
"Electronics and Telecommunication Engineering department at K. J. Somaiya College of Engineering is committed to develop competent engineers ready to solve real world problems related to the field of electronics and communications. The department has always been on a high growth path and has experienced and dedicated faculty members with a strong devotion in the field of engineering education. The major areas of faculty expertise include Basic Electronics, Communication Systems, Computer Networks, Control Systems, Digital Signal Processing, Image Processing, Computer Vision, Instrumentation, Signal Processing, RF & Microwaves and VLSI Systems."
"For more information please visit the following link!"
"<a href='https://kjsce.somaiya.edu/en/programme/bachelor-of-technology-electronic-telecommunication'>programes</a>"
#"https://kjsce.somaiya.edu/en/programme/bachelor-of-technology-electronic-telecommunication"
},
"admissions" :{
"For admission , intrested student are required to give Somaiya Entrance Test-Engineering(SET-E)."
"We are delighted that you are considering an B.Tech programme at K J Somaiya College of Engineering. Please read all instructions and requirements carefully to ensure a complete and correct application on the provided link:"
#"https://kjsce.somaiya.edu/en/admission/btech"
"<a href='https://kjsce.somaiya.edu/en/admission/btech'>admission</a>"
"Feel free to contact us on <EMAIL> in case of any queries."
}
}
}
return EXTC
def add_ITdata():
IT = {"it" : {
"Faculty" : {
"Dr. <NAME>AM is the Head of Department."
"The IT Department has a total of 29 faculty members consisting of professors and assistant professors with high technical qualifications and industry required skills."
"For more information please visit the given link!"
"<a href='https://kjsce.somaiya.edu/en/contact-us/faculty-directory'>Faculty</a>"
#"https://kjsce.somaiya.edu/en/contact-us/faculty-directory"
},
"infrastructure" :{
"The department of Information Technology has sophisticated cutting edge laboratories with the latest infrastructure for parallel computing, cloud computing, Penetration Testing and Internet of Things provide quality academic experience for students."
"For more information please visit the given link!"
"<a href='https://kjsce.somaiya.edu/en/programme/bachelor-of-technology-information-technology'>infrastructure</a>"
#"https://kjsce.somaiya.edu/en/programme/bachelor-of-technology-information-technology"
},
"syllabus" :{
"Academia Curricula benchmarked with International Institutes of repute. Stream-based choices of electives in the curriculum help focused skill development and flexible credit courses for students opting higher studies abroad. Exposure to audit courses, Interdisciplinary courses, trending lab course makes a student ready for dynamically changing industry."
"IT involves development of applications that churns and infers every data point in the diversified domain of Data Science, Artificial Intelligence, Cyber Security, Cloud Computing, Blockchain Technology, Application Development, IOT, etc. IT department is mandated towards focused delivery of the content by aligning curriculum and infrastructure to cater to emerging industry needs."
"For more information please visit the given link!"
"<a href='https://kjsce.somaiya.edu/en/programme/bachelor-of-technology-information-technology'>syllabus</a>"
#"https://kjsce.somaiya.edu/en/programme/bachelor-of-technology-information-technology"
},
'placement' :{
"The Placement Process at KJSCE acts as a link between the expectations of the Recruiters with the dreams of the students. "
"In year 2019-2020, we had 84 Companies visiting for recruitment."
"The highest package received was Rs.18.75 lpa and overall average of Rs.5.55 lpa with over 600+ successfull placements."
"For more information please visit the following link!"
"<a href='https://kjsce.somaiya.edu/en/placement/overview'>placement</a>"
#"https://kjsce.somaiya.edu/en/placement/overview"
},
'student': {
"We at the K J Somaiya College of Engineering continuously strive to excel in academic along with various Co-curricular and extra-curricular activities. Extracurricular activities are not the components of the academic curriculum but an integral part of the educational environment for the intellectual development of an engineer. It will explore hidden talent inside you, it will make you more adaptive and flexible and help you to become a smart engineer not only just an engineering graduate."
"Codecell - KJSCE Codecell, a Codechef campus chapter was founded in 2014. The main goals of K J Somaiya College of Engineering Codecell are to promote a culture of competitive coding and to improve participation of the college in the prestigious competition ACM ICPC."
"Bloombox - BloomBox is the entrepreneurship cell at K J Somaiya College of Engineering."
"For more information you can visit the given link!"
"<a href='https://kjsce.somaiya.edu/en/students-association'>student body</a>"
#"https://kjsce.somaiya.edu/en/students-association"
},
'programes' :{
"IT involves development of applications that churns and infers every data point in the diversified domain of Data Science, Artificial Intelligence, Cyber Security, Cloud Computing, Blockchain Technology, Application Development, IOT, etc."
"IT department is mandated towards focused delivery of the content by aligning curriculum and infrastructure to cater to emerging industry needs."
"Department aims at strengthening and preparing students for lifelong learning, research and successful adaptation of ever changing technology, which helps them to develop an ability to analyze, design and provide novel IT solutions for engineering problems."
"For more information you can visit the given link!"
"<a href='https://kjsce.somaiya.edu/en/programme/bachelor-of-technology-information-technology'>programes</a>"
#"https://kjsce.somaiya.edu/en/programme/bachelor-of-technology-information-technology"
},
'admissions':{
"For admission , intrested student are required to give Somaiya Entrance Test-Engineering(SET-E)."
"We are delighted that you are considering an B.Tech programme at K J Somaiya College of Engineering. Please read all instructions and requirements carefully to ensure a complete and correct application on the provided link:"
#"https://kjsce.somaiya.edu/en/admission/btech"
"<a href='https://kjsce.somaiya.edu/en/admission/btech'>admissions</a>"
"Feel free to contact us on <EMAIL> in case of any queries."
}
}
}
return IT
def add_mechdata():
Mech = {'mech':{
'Faculty' : {
"Dr. <NAME> is the Head of Department."
"The Mechanical Department has a total of 30 faculty members consisting of highly skilled professors and assistant professors with a good practical experience of the industry."
"For more information you can visit the given link!"
"<a href='https://kjsce.somaiya.edu/en/contact-us/faculty-directory'>Faculty</a>"
#"https://kjsce.somaiya.edu/en/contact-us/faculty-directory"
},
'infrastructure' : {
"The department of Mechanical Engineering has state of art laboratories and workshops where students can work on industry level projects with help of trained professoes and lab assistants."
"Special workshops are alloted to different teams like ROBOCON , Red shift Racing, Orion Racing where students can work after college hours on projects with full access to all the equipments."
#"https://kjsce.somaiya.edu/en/programme/bachelor-of-technology-mechanical-engineering"
},
'syllabus' : {
"The syllabus includes multiple technical subjexts covering complete repertoire of skills necessary for overall development in field of Mechanical Engineering with workshops on emerging areas, industry based UG projects as per thrust areas ."
"Projects are converted as Lab experiments. Virtual Lab Experiments and variety of internal assessment techniques such as, Open book test, MCQ, Research Presentations, Case studies, Mini Projects and many more Funded projects."
"For more information please visit the following link!"
"<a href='https://kjsce.somaiya.edu/en/programme/bachelor-of-technology-mechanical-engineering'></a>"
#"https://kjsce.somaiya.edu/en/programme/bachelor-of-technology-mechanical-engineering"
},
'placement' :{
"The Placement Process at KJSCE acts as a link between the expectations of the Recruiters with the dreams of the students. "
"In year 2019-2020, we had 84 Companies | |
import random, sys, time, os
import tkinter as Tk
import xlrd
from tkinter import Button, Frame, Label, LabelFrame, messagebox, PhotoImage, Tk, W, Entry
from random import seed, randint, shuffle
from termcolor import colored
seed(1)
class Bq:
total_questions = 0
your_score = 0
qcount = 1
answer = ''
wrong = ''
choy = ''
quest_frame = None
qcount_frame = None
logo_frame = None
logo_lbl = None
ans_choices_frame = None
your_rating = 0
pc = 0
player_name = ''
level_round_current = 0
index_question = 0
tmp_count = 0
#following vars will eventually come from a config file.
window_title = 'Trò chơi Phòng chống dịch Covid-19'
# Set up main window.
root = Tk()
root.title(Bq.window_title)
# Frame for logo display.
Bq.logo_frame = LabelFrame(root)
Bq.logo_frame.grid(row=0, column=0)
# Load in and display logo image.
Bq.logo_lbl = Label(Bq.logo_frame)
PHOTO = PhotoImage(file=f'covid-logo.png')
#PHOTO = PhotoImage(file=f'1_png.png')
Bq.logo_lbl.config(image=PHOTO)
Bq.logo_lbl.grid(row=0, column=0, padx=2, pady=2)
Bq.logo_lbl.photo = PHOTO
# Frame for question counter.
Bq.qcount_frame = Frame(root)
Bq.qcount_frame.grid()
# Frame for printing questions.
Bq.quest_frame = Frame(root)
Bq.quest_frame.grid(row=2, column=0, padx=5, pady=8)
# Frame for printing the 4 poss answer choices in.
Bq.ans_choices_frame = Frame(root)
Bq.ans_choices_frame.grid(row=3, column=0, padx=5, pady=8)
# Frame for the answer buttons.
btns_frame = LabelFrame(root)
btns_frame.grid(padx=5, pady=8)
# Frame for score counter.
score_frame = Frame(root, pady=20)
score_frame.grid()
# Give the location of the file
Loc_of_QA = ("QnA.xls")
# To open Workbook
wb = xlrd.open_workbook(Loc_of_QA)
sheet = wb.sheet_by_index(0)
sheet.cell_value(2, 0)
ques_list=[]
ans_list=[]
index_list=[]
difficult_level=[]
for i in range(sheet.nrows):
index_list.append(i)
ques_list.append(sheet.cell_value(i, 2))
difficult_level.append(sheet.cell_value(i, 1))
if sheet.cell_value(i, 7) == "A":
ans_list.append(sheet.cell_value(i, 3))
ans_list.append(sheet.cell_value(i, 4))
ans_list.append(sheet.cell_value(i, 5))
ans_list.append(sheet.cell_value(i, 6))
elif sheet.cell_value(i, 7) == "B":
ans_list.append(sheet.cell_value(i, 4))
ans_list.append(sheet.cell_value(i, 3))
ans_list.append(sheet.cell_value(i, 5))
ans_list.append(sheet.cell_value(i, 6))
elif sheet.cell_value(i, 7) == "C":
ans_list.append(sheet.cell_value(i, 5))
ans_list.append(sheet.cell_value(i, 3))
ans_list.append(sheet.cell_value(i, 4))
ans_list.append(sheet.cell_value(i, 6))
elif sheet.cell_value(i, 7) == "D":
ans_list.append(sheet.cell_value(i, 6))
ans_list.append(sheet.cell_value(i, 3))
ans_list.append(sheet.cell_value(i, 4))
ans_list.append(sheet.cell_value(i, 5))
else:
ans_list.append(sheet.cell_value(i, 6))
ans_list.append(sheet.cell_value(i, 3))
ans_list.append(sheet.cell_value(i, 4))
ans_list.append(sheet.cell_value(i, 5))
shuffle(index_list)
Bq.total_questions = (len(ques_list))
def get_rating():
"""Get percentage of questions answered correctly
and link it to a game over message."""
Bq.your_rating = 0
per_cent = 100 * float(Bq.your_score)/20
temp = round(per_cent, 3)
Bq.pc = str(temp)+'%'
if per_cent < 26:
Bq.your_rating = 'Kêt quả không được tốt, ' + str(Bq.player_name)+ ' đã nghiêm túc thực hiện chưa?'
return
if per_cent < 51:
Bq.your_rating = 'Kêt quả tạm chấp nhận được, ' + str(Bq.player_name)+ ' hãy cố gắng trong lần chơi tới nhé!'
return
if per_cent < 76:
Bq.your_rating = 'Hi, kết quả tốt đó. Nhưng ' + str(Bq.player_name)+ 'cũng cần cải thiện thêm.'
return
if per_cent < 101:
Bq.your_rating = 'Kêt quả quá tuyệt vời, ' + str(Bq.player_name)+ ' thực sự hiểu quá rõ về covid-19!'
return
def check_end_game():
"""Check if game over, if so get rating and end game."""
get_rating()
if Bq.index_question == (20):
messagebox.showinfo(Bq.window_title, 'Trò chơi kết thúc\n\n Kêt quả của ' + str(Bq.player_name)+ ' là '
+str(Bq.your_score)+ ' trên tổng số '
+str(20)+'\n\n'
+str(Bq.your_rating))
root.destroy()
sys.exit()
def update_score():
"""Update the players score label."""
score_label = Label(score_frame,
bg='plum', font=('Arial', 14, 'bold'),
text='Điểm hiện tại của ' + str(Bq.player_name)+ ' là: ' + str(Bq.your_score))
score_label.grid(row=0, column=0)
def correctly_answered():
"""Pop up msgbox if answered correctly."""
messagebox.showinfo(Bq.window_title,
' Câu trả lời: "' + str(Bq.answer) + ' " là câu trả lời chính xác !\n\n'
'Xin chúc mừng ' + str(Bq.player_name)+ ', bạn đã có thêm điểm.')
Bq.tmp_count += 1 # Next question.
check_end_game()
display_quest_count()
display_question()
display_answer_choices()
def wrong_answer():
"""Pop up box if answered incorrectly."""
messagebox.showinfo(Bq.window_title,
str(Bq.wrong)+' không chính xác !\n\n'
'' + str(Bq.player_name)+ ' không được nhận thêm điểm nào.')
Bq.tmp_count += 1
check_end_game()
display_quest_count()
display_question()
display_answer_choices()
def display_quest_count():
"""Show question number."""
Bq.qcount_frame.destroy()
Bq.qcount_frame = Frame(root)
Bq.qcount_frame.grid(row=1, column=0, padx=5, pady=8)
txt = 'Vòng 1:' +' Đương Đầu: ' + 'câu hỏi số ' + str(Bq.index_question + 1)+'/20 '
PHOTO = PhotoImage(file=f'1_png.png')
Bq.logo_lbl.config(image=PHOTO)
Bq.logo_lbl.grid(row=0, column=0, padx=2, pady=2)
Bq.logo_lbl.photo = PHOTO
if Bq.index_question >= 5:
txt = 'Vòng 2:' +' Bảo Vệ: ' + 'câu hỏi số ' + str(Bq.index_question + 1)+'/20 '
PHOTO = PhotoImage(file=f'2_png.png')
Bq.logo_lbl.config(image=PHOTO)
Bq.logo_lbl.grid(row=0, column=0, padx=2, pady=2)
Bq.logo_lbl.photo = PHOTO
if Bq.index_question >= 10:
txt = 'Vòng 3:' +' Quyết Đấu: ' + 'câu hỏi số ' + str(Bq.index_question + 1)+'/20 '
PHOTO = PhotoImage(file=f'3_png.png')
Bq.logo_lbl.config(image=PHOTO)
Bq.logo_lbl.grid(row=0, column=0, padx=2, pady=2)
Bq.logo_lbl.photo = PHOTO
if Bq.index_question >= 15:
txt = 'Vòng 4:' +' Vượt Qua Đại Dịch: ' + 'câu hỏi số ' + str(Bq.index_question + 1)+'/20 '
PHOTO = PhotoImage(file=f'4_png.png')
Bq.logo_lbl.config(image=PHOTO)
Bq.logo_lbl.grid(row=0, column=0, padx=2, pady=2)
Bq.logo_lbl.photo = PHOTO
qcount_label = Label(Bq.qcount_frame, bg='skyblue', fg='white',
font=('Arial', 14, 'bold'),
text=txt)
qcount_label.grid(row=1, column=0)
def display_question():
"""Display question."""
Bq.quest_frame.destroy()
Bq.quest_frame = Frame(root)
Bq.quest_frame.grid(row=2, column=0, padx=5, pady=8)
Bq.index_question = Bq.index_question + 1
if (Bq.index_question == 6):
Bq.level_round_current = 1
Bq.tmp_count = 1
if (Bq.index_question == 11):
Bq.level_round_current = 2
Bq.tmp_count = 1
if (Bq.index_question == 16):
Bq.level_round_current = 3
Bq.tmp_count = 1
if Bq.level_round_current == 0:
while (difficult_level[index_list[Bq.tmp_count]] != 'M1'):
# print(Bq.qcount)
# print(index_list[Bq.qcount])
# print (difficult_level[index_list[Bq.qcount]])
Bq.tmp_count = Bq.tmp_count + 1
if Bq.level_round_current == 1:
while (difficult_level[index_list[Bq.tmp_count]] != 'M2'):
# print(Bq.qcount)
# print(index_list[Bq.qcount])
# print (difficult_level[index_list[Bq.qcount]])
Bq.tmp_count = Bq.tmp_count + 1
if Bq.level_round_current == 2:
while (difficult_level[index_list[Bq.tmp_count]] != 'M3'):
# print(Bq.qcount)
# print(index_list[Bq.qcount])
# print (difficult_level[index_list[Bq.qcount]])
Bq.tmp_count = Bq.tmp_count + 1
if Bq.level_round_current == 3:
while (difficult_level[index_list[Bq.tmp_count]] != 'M4'):
# print(Bq.qcount)
# print(index_list[Bq.qcount])
# print (difficult_level[index_list[Bq.qcount]])
Bq.tmp_count = Bq.tmp_count + 1
# print(Bq.tmp_count)
# print(index_list[Bq.tmp_count])
# print (difficult_level[index_list[Bq.tmp_count]])
Bq.qcount = index_list[Bq.tmp_count]
quest_ion = (ques_list[Bq.qcount])
quest_label = Label(Bq.quest_frame, height=3,
fg='blue', wraplength=330, justify='left',
font=('Arial', 11, 'italic', 'bold'),
text='Câu hỏi: ' + quest_ion)
quest_label.grid(row=0, column=0)
def display_answer_choices():
"""Show the multiple choice answers."""
correct_answer = Bq.qcount *4
Bq.ans_choices_frame.destroy()
Bq.ans_choices_frame = Frame(root)
Bq.ans_choices_frame.grid(row=3, column=0, padx=5, pady=8)
# Need to get the four multiple choice answers into a list so
# the answers can be shuffled randomly.
temp1 = ans_list[Bq.qcount * 4]
temp2 = ans_list[Bq.qcount * 4+1]
temp3 = ans_list[Bq.qcount * 4+2]
temp4 = ans_list[Bq.qcount * 4+3]
# Have to join like this, I dont know other way to do it,
# but doing it this way makes a tuple which cant be shuffled.
tup = (temp1), (temp2), (temp3), (temp4)
# So convert tuple to a list, otherwise can't shuffle it.
Bq.choy = list(tup)
# Mix up the sequence of answers because in ans_list the correct
# answer is always first.
random.shuffle(Bq.choy)
# Print the answer choices, now they are in a random order.
ans_0 = Label(Bq.ans_choices_frame, font=('Arial', 10, 'bold'),
text='A. ' + Bq.choy[0])
ans_0.grid(row=0, column=0, sticky=W)
ans_1 = Label(Bq.ans_choices_frame, font=('Arial', 10, 'bold'),
text='B. ' + Bq.choy[1])
ans_1.grid(row=1, column=0, sticky=W)
ans_2 = Label(Bq.ans_choices_frame, font=('Arial', 10, 'bold'),
text='C. ' + Bq.choy[2])
ans_2.grid(row=2, column=0, sticky=W)
ans_3 = Label(Bq.ans_choices_frame, font=('Arial', 10, 'bold'),
text='D. ' + Bq.choy[3])
ans_3.grid(row=3, column=0, sticky=W)
Bq.answer = ans_list[correct_answer]
def clkd_but_a():
"""Answer button A was clicked."""
if Bq.answer == Bq.choy[0]:
Bq.your_score += 1
update_score()
correctly_answered()
else:
Bq.wrong = Bq.choy[0]
wrong_answer()
def clkd_but_b():
"""Answer button B was clicked."""
if Bq.answer == Bq.choy[1]:
Bq.your_score += 1
update_score()
correctly_answered()
else:
Bq.wrong = Bq.choy[1]
wrong_answer()
def clkd_but_c():
"""Answer button C was clicked."""
if Bq.answer == Bq.choy[2]:
Bq.your_score += 1
update_score()
correctly_answered()
else:
Bq.wrong = Bq.choy[2]
wrong_answer()
def clkd_but_d():
"""Answer button D was clicked."""
if Bq.answer == Bq.choy[3]:
Bq.your_score += 1
update_score()
correctly_answered()
else:
Bq.wrong = Bq.choy[3]
wrong_answer()
# GUI buttons A B C D.
btn_a = Button(btns_frame, bg='gold',
font=('Arial', 14, 'bold'), text=' A ',
command=clkd_but_a)
btn_a.grid(row=5, column=0, pady=15, padx=15)
btn_b = Button(btns_frame, bg='red',
font=('Arial', 14, 'bold'), text=' B ',
command=clkd_but_b)
btn_b.grid(row=5, column=1, pady=15, padx=15)
btn_c = Button(btns_frame, bg='springgreen',
font=('Arial', 14, 'bold'), text=' C ',
command=clkd_but_c)
btn_c.grid(row=5, column=2, pady=15, padx=15)
btn_d = Button(btns_frame, bg='white',
font=('Arial', 14, 'bold'), text=' D ',
command=clkd_but_d)
btn_d.grid(row=5, column=3, pady=15, padx=15)
def Start_the_game(player_name_game):
Bq.player_name = player_name_game
messagebox.showinfo(Bq.window_title,'Xin chào ' + str(Bq.player_name) + ' tới Trò chơi Phòng chống dịch Covid-19 !')
NameCheck01 = Bq.player_name
while True:
if NameCheck01 == '':
messagebox.showerror(title='Lỗi', message='Tên đăng nhập không hợp lệ! Vui lòng khởi động lại trò chơi')
else:
break
Bq.quest_frame.destroy()
display_quest_count()
display_question()
display_answer_choices()
update_score()
btns_frame = LabelFrame(root)
btns_frame.grid(padx=5, pady=8)
btn_a = Button(btns_frame, bg='gold',
font=('Arial', 14, 'bold'), text=' A ',
command=clkd_but_a)
btn_a.grid(row=5, column=0, pady=15, padx=15)
btn_b = Button(btns_frame, bg='red',
font=('Arial', 14, 'bold'), text=' B ',
command=clkd_but_b)
btn_b.grid(row=5, column=1, pady=15, padx=15)
btn_c = Button(btns_frame, bg='springgreen',
font=('Arial', 14, 'bold'), text=' C ',
command=clkd_but_c)
btn_c.grid(row=5, column=2, pady=15, padx=15)
btn_d = Button(btns_frame, bg='white',
font=('Arial', 14, 'bold'), text=' D ',
command=clkd_but_d)
btn_d.grid(row=5, column=3, pady=15, padx=15)
# Register name to access the game.
btns_frame.destroy()
Name_Filling = Label(Bq.quest_frame, text = " Xin | |
##
## This file is part of the libsigrokdecode project.
##
## Copyright (C) 2012 <NAME> <<EMAIL>>
##
## This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation; either version 3 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with this program; if not, see <http://www.gnu.org/licenses/>.
##
# TODO:
# - EDID < 1.3
# - add short annotations
# - Signal level standard field in basic display parameters block
# - Additional color point descriptors
# - Additional standard timing descriptors
# - Extensions
import sigrokdecode as srd
from common.srdhelper import SrdIntEnum
import os
St = SrdIntEnum.from_str('St', 'OFFSET EXTENSIONS HEADER EDID')
EDID_HEADER = [0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00]
OFF_VENDOR = 8
OFF_VERSION = 18
OFF_BASIC = 20
OFF_CHROM = 25
OFF_EST_TIMING = 35
OFF_STD_TIMING = 38
OFF_DET_TIMING = 54
OFF_NUM_EXT = 126
OFF_CHECKSUM = 127
# Pre-EDID established timing modes
est_modes = [
'720x400@70Hz',
'720x400@88Hz',
'640x480@60Hz',
'640x480@67Hz',
'640x480@72Hz',
'640x480@75Hz',
'800x600@56Hz',
'800x600@60Hz',
'800x600@72Hz',
'800x600@75Hz',
'832x624@75Hz',
'1024x768@87Hz(i)',
'1024x768@60Hz',
'1024x768@70Hz',
'1024x768@75Hz',
'1280x1024@75Hz',
'1152x870@75Hz',
]
# X:Y display aspect ratios, as used in standard timing modes
xy_ratio = [
(16, 10),
(4, 3),
(5, 4),
(16, 9),
]
# Annotation classes
ANN_FIELDS = 0
ANN_SECTIONS = 1
class Decoder(srd.Decoder):
api_version = 3
id = 'edid'
name = 'EDID'
longname = 'Extended Display Identification Data'
desc = 'Data structure describing display device capabilities.'
license = 'gplv3+'
inputs = ['i2c']
outputs = []
tags = ['Display', 'Memory', 'PC']
annotations = (
('field', 'Field'),
('section', 'Section'),
)
annotation_rows = (
('fields', 'Fields', (0,)),
('sections', 'Sections', (1,)),
)
def __init__(self):
self.reset()
def reset(self):
self.state = None
# Received data items, used as an index into samplenum/data
self.cnt = 0
# Start/end sample numbers per data item
self.sn = []
# Received data
self.cache = []
# Random read offset
self.offset = 0
# Extensions
self.extension = 0
self.ext_sn = [[]]
self.ext_cache = [[]]
def start(self):
self.out_ann = self.register(srd.OUTPUT_ANN)
def decode(self, ss, es, data):
cmd, data = data
if cmd == 'ADDRESS WRITE' and data == 0x50:
self.state = St.OFFSET
self.ss = ss
return
if cmd == 'ADDRESS READ' and data == 0x50:
if self.extension > 0:
self.state = St.EXTENSIONS
s = str(self.extension)
t = ["Extension: " + s, "X: " + s, s]
else:
self.state = St.HEADER
t = ["EDID"]
self.put(ss, es, self.out_ann, [ANN_SECTIONS, t])
return
if cmd == 'DATA WRITE' and self.state == St.OFFSET:
self.offset = data
self.extension = self.offset // 128
self.cnt = self.offset % 128
if self.extension > 0:
ext = self.extension - 1
l = len(self.ext_sn[ext])
# Truncate or extend to self.cnt.
self.sn = self.ext_sn[ext][0:self.cnt] + [0] * max(0, self.cnt - l)
self.cache = self.ext_cache[ext][0:self.cnt] + [0] * max(0, self.cnt - l)
else:
l = len(self.sn)
self.sn = self.sn[0:self.cnt] + [0] * max(0, self.cnt - l)
self.cache = self.cache[0:self.cnt] + [0] * max(0, self.cnt - l)
ss = self.ss if self.ss else ss
s = str(data)
t = ["Offset: " + s, "O: " + s, s]
self.put(ss, es, self.out_ann, [ANN_SECTIONS, t])
return
# We only care about actual data bytes that are read (for now).
if cmd != 'DATA READ':
return
self.cnt += 1
if self.extension > 0:
self.ext_sn[self.extension - 1].append([ss, es])
self.ext_cache[self.extension - 1].append(data)
else:
self.sn.append([ss, es])
self.cache.append(data)
if self.state is None or self.state == St.HEADER:
# Wait for the EDID header
if self.cnt >= OFF_VENDOR:
if self.cache[-8:] == EDID_HEADER:
# Throw away any garbage before the header
self.sn = self.sn[-8:]
self.cache = self.cache[-8:]
self.cnt = 8
self.state = St.EDID
self.put(self.sn[0][0], es, self.out_ann,
[ANN_SECTIONS, ['Header']])
self.put(self.sn[0][0], es, self.out_ann,
[ANN_FIELDS, ['Header pattern']])
elif self.state == St.EDID:
if self.cnt == OFF_VERSION:
self.decode_vid(-10)
self.decode_pid(-8)
self.decode_serial(-6)
self.decode_mfrdate(-2)
self.put(self.sn[OFF_VENDOR][0], es, self.out_ann,
[ANN_SECTIONS, ['Vendor/product']])
elif self.cnt == OFF_BASIC:
self.put(self.sn[OFF_VERSION][0], es, self.out_ann,
[ANN_SECTIONS, ['EDID Version']])
self.put(self.sn[OFF_VERSION][0], self.sn[OFF_VERSION][1],
self.out_ann, [ANN_FIELDS,
['Version %d' % self.cache[-2]]])
self.put(self.sn[OFF_VERSION+1][0], self.sn[OFF_VERSION+1][1],
self.out_ann, [ANN_FIELDS,
['Revision %d' % self.cache[-1]]])
elif self.cnt == OFF_CHROM:
self.put(self.sn[OFF_BASIC][0], es, self.out_ann,
[ANN_SECTIONS, ['Basic display']])
self.decode_basicdisplay(-5)
elif self.cnt == OFF_EST_TIMING:
self.put(self.sn[OFF_CHROM][0], es, self.out_ann,
[ANN_SECTIONS, ['Color characteristics']])
self.decode_chromaticity(-10)
elif self.cnt == OFF_STD_TIMING:
self.put(self.sn[OFF_EST_TIMING][0], es, self.out_ann,
[ANN_SECTIONS, ['Established timings']])
self.decode_est_timing(-3)
elif self.cnt == OFF_DET_TIMING:
self.put(self.sn[OFF_STD_TIMING][0], es, self.out_ann,
[ANN_SECTIONS, ['Standard timings']])
self.decode_std_timing(self.cnt - 16)
elif self.cnt == OFF_NUM_EXT:
self.decode_descriptors(-72)
elif self.cnt == OFF_CHECKSUM:
self.put(ss, es, self.out_ann,
[0, ['Extensions present: %d' % self.cache[self.cnt-1]]])
elif self.cnt == OFF_CHECKSUM+1:
checksum = 0
for i in range(128):
checksum += self.cache[i]
if checksum % 256 == 0:
csstr = 'OK'
else:
csstr = 'WRONG!'
self.put(ss, es, self.out_ann, [0, ['Checksum: %d (%s)' % (
self.cache[self.cnt-1], csstr)]])
self.state = St.EXTENSIONS
elif self.state == St.EXTENSIONS:
cache = self.ext_cache[self.extension - 1]
sn = self.ext_sn[self.extension - 1]
v = cache[self.cnt - 1]
if self.cnt == 1:
if v == 2:
self.put(ss, es, self.out_ann, [1, ['Extensions Tag', 'Tag']])
else:
self.put(ss, es, self.out_ann, [1, ['Bad Tag']])
elif self.cnt == 2:
self.put(ss, es, self.out_ann, [1, ['Version']])
self.put(ss, es, self.out_ann, [0, [str(v)]])
elif self.cnt == 3:
self.put(ss, es, self.out_ann, [1, ['DTD offset']])
self.put(ss, es, self.out_ann, [0, [str(v)]])
elif self.cnt == 4:
self.put(ss, es, self.out_ann, [1, ['Format support | DTD count']])
support = "Underscan: {0}, {1} Audio, YCbCr: {2}".format(
"yes" if v & 0x80 else "no",
"Basic" if v & 0x40 else "No",
["None", "422", "444", "422+444"][(v & 0x30) >> 4])
self.put(ss, es, self.out_ann, [0, ['{0}, DTDs: {1}'.format(support, v & 0xf)]])
elif self.cnt <= cache[2]:
if self.cnt == cache[2]:
self.put(sn[4][0], es, self.out_ann, [1, ['Data block collection']])
self.decode_data_block_collection(cache[4:], sn[4:])
elif (self.cnt - cache[2]) % 18 == 0:
n = (self.cnt - cache[2]) / 18
if n <= cache[3] & 0xf:
self.put(sn[self.cnt - 18][0], es, self.out_ann, [1, ['DTD']])
self.decode_descriptors(-18)
elif self.cnt == 127:
dtd_last = cache[2] + (cache[3] & 0xf) * 18
self.put(sn[dtd_last][0], es, self.out_ann, [1, ['Padding']])
elif self.cnt == 128:
checksum = sum(cache) % 256
self.put(ss, es, self.out_ann, [0, ['Checksum: %d (%s)' % (
cache[self.cnt-1], 'Wrong' if checksum else 'OK')]])
def ann_field(self, start, end, annotation):
annotation = annotation if isinstance(annotation, list) else [annotation]
sn = self.ext_sn[self.extension - 1] if self.extension else self.sn
self.put(sn[start][0], sn[end][1],
self.out_ann, [ANN_FIELDS, annotation])
def lookup_pnpid(self, pnpid):
pnpid_file = os.path.join(os.path.dirname(__file__), 'pnpids.txt')
if os.path.exists(pnpid_file):
for line in open(pnpid_file).readlines():
if line.find(pnpid + ';') == 0:
return line[4:].strip()
return ''
def decode_vid(self, offset):
pnpid = chr(64 + ((self.cache[offset] & 0x7c) >> 2))
pnpid += chr(64 + (((self.cache[offset] & 0x03) << 3)
| ((self.cache[offset+1] & 0xe0) >> 5)))
pnpid += chr(64 + (self.cache[offset+1] & 0x1f))
vendor = self.lookup_pnpid(pnpid)
if vendor:
pnpid += ' (%s)' % vendor
self.ann_field(offset, offset+1, pnpid)
def decode_pid(self, offset):
pidstr = 'Product 0x%.2x%.2x' % (self.cache[offset+1], self.cache[offset])
self.ann_field(offset, offset+1, pidstr)
def decode_serial(self, offset):
serialnum = (self.cache[offset+3] << 24) \
+ (self.cache[offset+2] << 16) \
+ (self.cache[offset+1] << 8) \
+ self.cache[offset]
serialstr = ''
is_alnum = True
for i in range(4):
if not chr(self.cache[offset+3-i]).isalnum():
is_alnum = False
break
serialstr += chr(self.cache[offset+3-i])
serial = serialstr if is_alnum else str(serialnum)
self.ann_field(offset, offset+3, 'Serial ' + serial)
def decode_mfrdate(self, offset):
datestr = ''
if self.cache[offset]:
datestr += 'week %d, ' % self.cache[offset]
datestr += str(1990 + self.cache[offset+1])
if datestr:
self.ann_field(offset, offset+1, ['Manufactured ' + datestr, datestr])
def decode_basicdisplay(self, offset):
# Video input definition
vid = self.cache[offset]
if vid & 0x80:
# Digital
self.ann_field(offset, offset, 'Video input: VESA DFP 1.')
else:
# Analog
sls = (vid & 60) >> 5
self.ann_field(offset, offset, 'Signal level standard: %.2x' % sls)
if vid & 0x10:
self.ann_field(offset, offset, 'Blank-to-black setup expected')
syncs = ''
if vid & 0x08:
syncs += 'separate syncs, '
if vid & 0x04:
syncs += 'composite syncs, '
if vid & 0x02:
syncs += 'sync on green, '
if vid & 0x01:
syncs += 'Vsync serration required, '
if syncs:
self.ann_field(offset, offset, 'Supported syncs: %s' % syncs[:-2])
# Max horizontal/vertical image size
if self.cache[offset+1] != 0 | |
state = self.__dict__.copy() # copy the objects state
# Remove unpicklable entries (those which are lazily loaded
del state['rays_per_sweep']
del state['gate_x']
del state['gate_y']
del state['gate_z']
del state['gate_longitude']
del state['gate_latitude']
del state['gate_altitude']
return state
def __setstate__(self, state):
""" Restore unpicklable entries from pickled object. """
self.__dict__.update(state)
self.init_rays_per_sweep()
self.init_gate_x_y_z()
self.init_gate_longitude_latitude()
self.init_gate_altitude()
# Attribute init/reset method
def init_rays_per_sweep(self):
""" Initialize or reset the rays_per_sweep attribute. """
lazydic = LazyLoadDict(get_metadata('rays_per_sweep'))
lazydic.set_lazy('data', _rays_per_sweep_data_factory(self))
self.rays_per_sweep = lazydic
def init_gate_x_y_z(self):
""" Initialize or reset the gate_{x, y, z} attributes. """
gate_x = LazyLoadDict(get_metadata('gate_x'))
gate_x.set_lazy('data', _gate_data_factory(self, 0))
self.gate_x = gate_x
gate_y = LazyLoadDict(get_metadata('gate_y'))
gate_y.set_lazy('data', _gate_data_factory(self, 1))
self.gate_y = gate_y
gate_z = LazyLoadDict(get_metadata('gate_z'))
gate_z.set_lazy('data', _gate_data_factory(self, 2))
self.gate_z = gate_z
def init_gate_longitude_latitude(self):
"""
Initialize or reset the gate_longitude and gate_latitude attributes.
"""
gate_longitude = LazyLoadDict(get_metadata('gate_longitude'))
gate_longitude.set_lazy('data', _gate_lon_lat_data_factory(self, 0))
self.gate_longitude = gate_longitude
gate_latitude = LazyLoadDict(get_metadata('gate_latitude'))
gate_latitude.set_lazy('data', _gate_lon_lat_data_factory(self, 1))
self.gate_latitude = gate_latitude
def init_gate_altitude(self):
""" Initialize the gate_altitude attribute. """
gate_altitude = LazyLoadDict(get_metadata('gate_altitude'))
gate_altitude.set_lazy('data', _gate_altitude_data_factory(self))
self.gate_altitude = gate_altitude
# private functions for checking limits, etc.
def _check_sweep_in_range(self, sweep):
""" Check that a sweep number is in range. """
if sweep < 0 or sweep >= self.nsweeps:
raise IndexError('Sweep out of range: ', sweep)
return
# public check functions
def check_field_exists(self, field_name):
"""
Check that a field exists in the fields dictionary.
If the field does not exist raise a KeyError.
Parameters
----------
field_name : str
Name of field to check.
"""
if field_name not in self.fields:
raise KeyError('Field not available: ' + field_name)
return
# Iterators
def iter_start(self):
""" Return an iterator over the sweep start indices. """
return (s for s in self.sweep_start_ray_index['data'])
def iter_end(self):
""" Return an iterator over the sweep end indices. """
return (s for s in self.sweep_end_ray_index['data'])
def iter_start_end(self):
""" Return an iterator over the sweep start and end indices. """
return ((s, e) for s, e in zip(self.iter_start(), self.iter_end()))
def iter_slice(self):
""" Return an iterator which returns sweep slice objects. """
return (slice(s, e+1) for s, e in self.iter_start_end())
def iter_field(self, field_name):
""" Return an iterator which returns sweep field data. """
self.check_field_exists(field_name)
return (self.fields[field_name]['data'][s] for s in self.iter_slice())
def iter_azimuth(self):
""" Return an iterator which returns sweep azimuth data. """
return (self.azimuth['data'][s] for s in self.iter_slice())
def iter_elevation(self):
""" Return an iterator which returns sweep elevation data. """
return (self.elevation['data'][s] for s in self.iter_slice())
# get methods
def get_start(self, sweep):
""" Return the starting ray index for a given sweep. """
self._check_sweep_in_range(sweep)
return self.sweep_start_ray_index['data'][sweep]
def get_end(self, sweep):
""" Return the ending ray for a given sweep. """
self._check_sweep_in_range(sweep)
return self.sweep_end_ray_index['data'][sweep]
def get_start_end(self, sweep):
""" Return the starting and ending ray for a given sweep. """
return self.get_start(sweep), self.get_end(sweep)
def get_slice(self, sweep):
""" Return a slice for selecting rays for a given sweep. """
start, end = self.get_start_end(sweep)
return slice(start, end+1)
def get_field(self, sweep, field_name, copy=False):
"""
Return the field data for a given sweep.
When used with :py:func:`get_gate_x_y_z` this method can be used to
obtain the data needed for plotting a radar field with the correct
spatial context.
Parameters
----------
sweep : int
Sweep number to retrieve data for, 0 based.
field_name : str
Name of the field from which data should be retrieved.
copy : bool, optional
True to return a copy of the data. False, the default, returns
a view of the data (when possible), changing this data will
change the data in the underlying Radar object.
Returns
-------
data : array
Array containing data for the requested sweep and field.
"""
self.check_field_exists(field_name)
s = self.get_slice(sweep)
data = self.fields[field_name]['data'][s]
if copy:
return data.copy()
else:
return data
def get_azimuth(self, sweep, copy=False):
"""
Return an array of azimuth angles for a given sweep.
Parameters
----------
sweep : int
Sweep number to retrieve data for, 0 based.
copy : bool, optional
True to return a copy of the azimuths. False, the default, returns
a view of the azimuths (when possible), changing this data will
change the data in the underlying Radar object.
Returns
-------
azimuths : array
Array containing the azimuth angles for a given sweep.
"""
s = self.get_slice(sweep)
azimuths = self.azimuth['data'][s]
if copy:
return azimuths.copy()
else:
return azimuths
def get_elevation(self, sweep, copy=False):
"""
Return an array of elevation angles for a given sweep.
Parameters
----------
sweep : int
Sweep number to retrieve data for, 0 based.
copy : bool, optional
True to return a copy of the elevations. False, the default,
returns a view of the elevations (when possible), changing this
data will change the data in the underlying Radar object.
Returns
-------
azimuths : array
Array containing the elevation angles for a given sweep.
"""
s = self.get_slice(sweep)
elevation = self.elevation['data'][s]
if copy:
return elevation.copy()
else:
return elevation
def get_gate_x_y_z(self, sweep, edges=False, filter_transitions=False):
"""
Return the x, y and z gate locations in meters for a given sweep.
With the default parameter this method returns the same data as
contained in the gate_x, gate_y and gate_z attributes but this method
performs the gate location calculations only for the specified sweep
and therefore is more efficient than accessing this data through these
attribute.
When used with :py:func:`get_field` this method can be used to obtain
the data needed for plotting a radar field with the correct spatial
context.
Parameters
----------
sweep : int
Sweep number to retrieve gate locations from, 0 based.
edges : bool, optional
True to return the locations of the gate edges calculated by
interpolating between the range, azimuths and elevations.
False (the default) will return the locations of the gate centers
with no interpolation.
filter_transitions : bool, optional
True to remove rays where the antenna was in transition between
sweeps. False will include these rays. No rays will be removed
if the antenna_transition attribute is not available (set to None).
Returns
-------
x, y, z : 2D array
Array containing the x, y and z, distances from the radar in
meters for the center (or edges) for all gates in the sweep.
"""
azimuths = self.get_azimuth(sweep)
elevations = self.get_elevation(sweep)
if filter_transitions and self.antenna_transition is not None:
sweep_slice = self.get_slice(sweep)
valid = self.antenna_transition['data'][sweep_slice] == 0
azimuths = azimuths[valid]
elevations = elevations[valid]
return antenna_vectors_to_cartesian(
self.range['data'], azimuths, elevations, edges=edges)
def get_gate_lat_lon_alt(self, sweep, reset_gate_coords=False,
filter_transitions=False):
"""
Return the longitude, latitude and altitude gate locations.
Longitude and latitude are in degrees and altitude in meters.
With the default parameter this method returns the same data as
contained in the gate_latitude, gate_longitude and gate_altitude
attributes but this method performs the gate location calculations
only for the specified sweep and therefore is more efficient than
accessing this data through these attribute. If coordinates have
at all, please use the reset_gate_coords parameter.
Parameters
----------
sweep : int
Sweep number to retrieve gate locations from, 0 based.
reset_gate_coords : bool, optional
Optional to reset the gate latitude, gate longitude and gate
altitude attributes before using them in this function. This
is useful when the geographic coordinates have changed and gate
latitude, gate longitude and gate altitude need to be reset.
filter_transitions : bool, optional
True to remove rays where the antenna was in transition between
sweeps. False will include these rays. No rays will be removed
if the antenna_transition attribute is not available (set to None).
Returns
-------
lat, lon, alt : 2D array
Array containing the latitude, longitude and altitude,
for all gates in the sweep.
"""
s = self.get_slice(sweep)
if reset_gate_coords:
gate_latitude = LazyLoadDict(get_metadata('gate_latitude'))
gate_latitude.set_lazy('data', _gate_lon_lat_data_factory(self, 1))
self.gate_latitude = gate_latitude
gate_longitude = LazyLoadDict(get_metadata('gate_longitude'))
gate_longitude.set_lazy('data', _gate_lon_lat_data_factory(self, 0))
self.gate_longitude = gate_longitude
gate_altitude = LazyLoadDict(get_metadata('gate_altitude'))
gate_altitude.set_lazy('data', _gate_altitude_data_factory(self))
self.gate_altitude = gate_altitude
lat = self.gate_latitude['data'][s]
lon = self.gate_longitude['data'][s]
alt = self.gate_altitude['data'][s]
if filter_transitions and self.antenna_transition is not None:
valid = self.antenna_transition['data'][s] == 0
lat = lat[valid]
lon = lon[valid]
alt = alt[valid]
return lat, lon, alt
def get_nyquist_vel(self, sweep, check_uniform=True):
"""
Return the Nyquist velocity in meters per second for a given sweep.
| |
# Copyright 2015-2017 Capital One Services, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function, unicode_literals
from datetime import datetime, timedelta
import importlib
import json
import logging
import os
import platform
import py_compile
import shutil
import site
import sys
import tempfile
import time
import unittest
import zipfile
import mock
from c7n.mu import (
custodian_archive,
generate_requirements,
LambdaFunction,
LambdaManager,
PolicyLambda,
PythonPackageArchive,
CloudWatchLogSubscription,
SNSSubscription,
SQSSubscription,
CloudWatchEventSource
)
from c7n.ufuncs import logsub
from .common import (
BaseTest, event_data, functional, Bag, ACCOUNT_ID)
from .data import helloworld
ROLE = "arn:aws:iam::644160558196:role/custodian-mu"
def test_generate_requirements():
lines = generate_requirements(
'boto3', ignore=('docutils', 's3transfer', 'six'))
packages = []
for l in lines.split('\n'):
pkg_name, version = l.split('==')
packages.append(pkg_name)
assert set(packages) == set([
'botocore', 'jmespath', 'urllib3', 'python-dateutil'])
class Publish(BaseTest):
def make_func(self, **kw):
func_data = dict(
name="test-foo-bar",
handler="index.handler",
memory_size=128,
timeout=3,
role='custodian-mu',
runtime="python2.7",
description="test",
)
func_data.update(kw)
archive = PythonPackageArchive()
archive.add_contents(
"index.py", """def handler(*a, **kw):\n print("Greetings, program!")"""
)
archive.close()
self.addCleanup(archive.remove)
return LambdaFunction(func_data, archive)
def test_publishes_a_lambda(self):
session_factory = self.replay_flight_data("test_publishes_a_lambda")
mgr = LambdaManager(session_factory)
func = self.make_func()
self.addCleanup(mgr.remove, func)
result = mgr.publish(func)
self.assertEqual(result["CodeSize"], 169)
def test_publish_a_lambda_with_layer_and_concurrency(self):
factory = self.replay_flight_data('test_lambda_layer_concurrent_publish')
mgr = LambdaManager(factory)
layers = ['arn:aws:lambda:us-east-1:644160558196:layer:CustodianLayer:2']
func = self.make_func(
concurrency=5,
layers=layers)
self.addCleanup(mgr.remove, func)
result = mgr.publish(func)
self.assertEqual(result['Layers'][0]['Arn'], layers[0])
state = mgr.get(func.name)
self.assertEqual(state['Concurrency']['ReservedConcurrentExecutions'], 5)
func = self.make_func(layers=layers)
output = self.capture_logging("custodian.serverless", level=logging.DEBUG)
result = mgr.publish(func)
self.assertEqual(result['Layers'][0]['Arn'], layers[0])
lines = output.getvalue().strip().split("\n")
self.assertFalse('Updating function: test-foo-bar config Layers' in lines)
self.assertTrue('Removing function: test-foo-bar concurrency' in lines)
def test_can_switch_runtimes(self):
session_factory = self.replay_flight_data("test_can_switch_runtimes")
func = self.make_func()
mgr = LambdaManager(session_factory)
self.addCleanup(mgr.remove, func)
result = mgr.publish(func)
self.assertEqual(result["Runtime"], "python2.7")
func.func_data["runtime"] = "python3.6"
result = mgr.publish(func)
self.assertEqual(result["Runtime"], "python3.6")
class PolicyLambdaProvision(BaseTest):
role = "arn:aws:iam::644160558196:role/custodian-mu"
def assert_items(self, result, expected):
for k, v in expected.items():
self.assertEqual(v, result[k])
def test_config_rule_provision(self):
session_factory = self.replay_flight_data("test_config_rule")
p = self.load_policy(
{
"resource": "security-group",
"name": "sg-modified",
"mode": {"type": "config-rule"},
},
session_factory=session_factory
)
pl = PolicyLambda(p)
mgr = LambdaManager(session_factory)
result = mgr.publish(pl, "Dev", role=ROLE)
self.assertEqual(result["FunctionName"], "custodian-sg-modified")
self.addCleanup(mgr.remove, pl)
def test_config_rule_evaluation(self):
session_factory = self.replay_flight_data("test_config_rule_evaluate")
p = self.load_policy(
{
"resource": "ec2",
"name": "ec2-modified",
"mode": {"type": "config-rule"},
"filters": [{"InstanceId": "i-094bc87c84d56c589"}],
},
session_factory=session_factory,
)
mode = p.get_execution_mode()
event = event_data("event-config-rule-instance.json")
resources = mode.run(event, None)
self.assertEqual(len(resources), 1)
def test_phd_account_mode(self):
factory = self.replay_flight_data('test_phd_event_mode')
p = self.load_policy(
{'name': 'ec2-retire',
'resource': 'account',
'mode': {
'categories': ['scheduledChange'],
'events': ['AWS_EC2_PERSISTENT_INSTANCE_RETIREMENT_SCHEDULED'],
'type': 'phd'}}, session_factory=factory)
mode = p.get_execution_mode()
event = event_data('event-phd-ec2-retire.json')
resources = mode.run(event, None)
self.assertEqual(len(resources), 1)
self.assertTrue('c7n:HealthEvent' in resources[0])
def test_phd_mode(self):
factory = self.replay_flight_data('test_phd_event_mode')
p = self.load_policy(
{'name': 'ec2-retire',
'resource': 'ec2',
'mode': {
'categories': ['scheduledChange'],
'events': ['AWS_EC2_PERSISTENT_INSTANCE_RETIREMENT_SCHEDULED'],
'type': 'phd'}}, session_factory=factory)
mode = p.get_execution_mode()
event = event_data('event-phd-ec2-retire.json')
resources = mode.run(event, None)
self.assertEqual(len(resources), 1)
p_lambda = PolicyLambda(p)
events = p_lambda.get_events(factory)
self.assertEqual(
json.loads(events[0].render_event_pattern()),
{'detail': {
'eventTypeCategory': ['scheduledChange'],
'eventTypeCode': ['AWS_EC2_PERSISTENT_INSTANCE_RETIREMENT_SCHEDULED']},
'source': ['aws.health']}
)
def test_cwl_subscriber(self):
self.patch(CloudWatchLogSubscription, "iam_delay", 0.01)
session_factory = self.replay_flight_data("test_cwl_subscriber")
session = session_factory()
client = session.client("logs")
lname = "custodian-test-log-sub"
self.addCleanup(client.delete_log_group, logGroupName=lname)
client.create_log_group(logGroupName=lname)
linfo = client.describe_log_groups(logGroupNamePrefix=lname)["logGroups"][0]
params = dict(
session_factory=session_factory,
name="c7n-log-sub",
role=ROLE,
sns_topic="arn:",
log_groups=[linfo],
)
func = logsub.get_function(**params)
manager = LambdaManager(session_factory)
finfo = manager.publish(func)
self.addCleanup(manager.remove, func)
results = client.describe_subscription_filters(logGroupName=lname)
self.assertEqual(len(results["subscriptionFilters"]), 1)
self.assertEqual(
results["subscriptionFilters"][0]["destinationArn"], finfo["FunctionArn"]
)
# try and update
# params['sns_topic'] = "arn:123"
# manager.publish(func)
@functional
def test_sqs_subscriber(self):
session_factory = self.replay_flight_data('test_mu_sqs_subscriber')
func_name = 'c7n-hello-sqs'
queue_name = "my-dev-test-3"
# Setup Queues
session = session_factory()
client = session.client('sqs')
queue_url = client.create_queue(QueueName=queue_name).get('QueueUrl')
queue_arn = client.get_queue_attributes(
QueueUrl=queue_url,
AttributeNames=['QueueArn'])['Attributes']['QueueArn']
self.addCleanup(client.delete_queue, QueueUrl=queue_url)
# Setup Function
params = dict(
session_factory=session_factory,
name=func_name,
role="arn:aws:iam::644160558196:role/custodian-mu",
events=[SQSSubscription(session_factory, [queue_arn])])
func = helloworld.get_function(**params)
manager = LambdaManager(session_factory)
manager.publish(func)
self.addCleanup(manager.remove, func)
# Send and Receive Check
client.send_message(
QueueUrl=queue_url, MessageBody=json.dumps({'jurassic': 'block'}))
if self.recording:
time.sleep(60)
log_events = list(manager.logs(func, "1970-1-1 UTC", "2037-1-1"))
messages = [
e["message"] for e in log_events if e["message"].startswith('{"Records')
]
self.addCleanup(
session.client("logs").delete_log_group,
logGroupName="/aws/lambda/%s" % func_name)
self.assertIn(
'jurassic',
json.loads(messages[0])["Records"][0]["body"])
@functional
def test_sns_subscriber_and_ipaddress(self):
self.patch(SNSSubscription, "iam_delay", 0.01)
session_factory = self.replay_flight_data("test_sns_subscriber_and_ipaddress")
session = session_factory()
client = session.client("sns")
# create an sns topic
tname = "custodian-test-sns-sub"
topic_arn = client.create_topic(Name=tname)["TopicArn"]
self.addCleanup(client.delete_topic, TopicArn=topic_arn)
# provision a lambda via mu
params = dict(
session_factory=session_factory,
name="c7n-hello-world",
role="arn:aws:iam::644160558196:role/custodian-mu",
events=[SNSSubscription(session_factory, [topic_arn])],
)
func = helloworld.get_function(**params)
manager = LambdaManager(session_factory)
manager.publish(func)
self.addCleanup(manager.remove, func)
# now publish to the topic and look for lambda log output
client.publish(TopicArn=topic_arn, Message="Greetings, program!")
if self.recording:
time.sleep(30)
log_events = manager.logs(func, "1970-1-1 UTC", "2037-1-1")
messages = [
e["message"] for e in log_events if e["message"].startswith('{"Records')
]
self.addCleanup(
session.client("logs").delete_log_group,
logGroupName="/aws/lambda/c7n-hello-world",
)
self.assertEqual(
json.loads(messages[0])["Records"][0]["Sns"]["Message"],
"Greetings, program!",
)
def test_cwe_update_config_and_code(self):
# Originally this was testing the no update case.. but
# That is tricky to record, any updates to the code end up
# causing issues due to checksum mismatches which imply updating
# the function code / which invalidate the recorded data and
# the focus of the test.
session_factory = self.replay_flight_data("test_cwe_update", zdata=True)
p = self.load_policy({
"resource": "s3",
"name": "s3-bucket-policy",
"mode": {"type": "cloudtrail",
"events": ["CreateBucket"], 'runtime': 'python2.7'},
"filters": [
{"type": "missing-policy-statement",
"statement_ids": ["RequireEncryptedPutObject"]},
],
"actions": ["no-op"],
})
pl = PolicyLambda(p)
mgr = LambdaManager(session_factory)
result = mgr.publish(pl, "Dev", role=ROLE)
self.addCleanup(mgr.remove, pl)
p = self.load_policy(
{
"resource": "s3",
"name": "s3-bucket-policy",
"mode": {
"type": "cloudtrail",
"memory": 256,
'runtime': 'python2.7',
"events": [
"CreateBucket",
{
"event": "PutBucketPolicy",
"ids": "requestParameters.bucketName",
"source": "s3.amazonaws.com",
},
],
},
"filters": [
{
"type": "missing-policy-statement",
"statement_ids": ["RequireEncryptedPutObject"],
}
],
"actions": ["no-op"],
},
)
output = self.capture_logging("custodian.serverless", level=logging.DEBUG)
result2 = mgr.publish(PolicyLambda(p), "Dev", role=ROLE)
lines = output.getvalue().strip().split("\n")
self.assertTrue("Updating function custodian-s3-bucket-policy code" in lines)
self.assertTrue(
"Updating function: custodian-s3-bucket-policy config MemorySize" in lines)
self.assertEqual(result["FunctionName"], result2["FunctionName"])
# drive by coverage
functions = [
i
for i in mgr.list_functions()
if i["FunctionName"] == "custodian-s3-bucket-policy"
]
self.assertTrue(len(functions), 1)
start = 0
end = time.time() * 1000
self.assertEqual(list(mgr.logs(pl, start, end)), [])
def test_cwe_trail(self):
session_factory = self.replay_flight_data("test_cwe_trail", zdata=True)
p = self.load_policy({
"resource": "s3",
"name": "s3-bucket-policy",
"mode": {"type": "cloudtrail", "events": ["CreateBucket"]},
"filters": [
{
"type": "missing-policy-statement",
"statement_ids": ["RequireEncryptedPutObject"],
}
],
"actions": ["no-op"]},
session_factory=session_factory)
pl = PolicyLambda(p)
mgr = LambdaManager(session_factory)
self.addCleanup(mgr.remove, pl)
result = mgr.publish(pl, "Dev", role=ROLE)
events = pl.get_events(session_factory)
self.assertEqual(len(events), 1)
event = events.pop()
self.assertEqual(
json.loads(event.render_event_pattern()),
{
u"detail": {
u"eventName": [u"CreateBucket"],
u"eventSource": [u"s3.amazonaws.com"],
},
u"detail-type": ["AWS API Call via CloudTrail"],
},
)
self.assert_items(
result,
{
"Description": "cloud-custodian lambda policy",
"FunctionName": "custodian-s3-bucket-policy",
"Handler": "custodian_policy.run",
"MemorySize": 512,
"Runtime": "python2.7",
"Timeout": 60,
},
)
def test_mu_metrics(self):
session_factory = self.replay_flight_data("test_mu_metrics")
p = self.load_policy(
{
"name": "s3-bucket-policy",
"resource": "s3",
"mode": {"type": "cloudtrail", "events": ["CreateBucket"]},
"actions": ["no-op"],
}, session_factory=session_factory)
pl = PolicyLambda(p)
mgr = LambdaManager(session_factory)
end = datetime.utcnow()
start = end - timedelta(1)
results = mgr.metrics([pl], start, end, 3600)
self.assertEqual(
results,
[{"Durations": [], "Errors": [], "Throttles": [], "Invocations": []}],
)
def test_cwe_instance(self):
session_factory = self.replay_flight_data("test_cwe_instance", zdata=True)
p = self.load_policy({
"resource": "s3",
"name": "ec2-encrypted-vol",
"mode": {"type": "ec2-instance-state", "events": ["pending"]}},
session_factory=session_factory)
pl = PolicyLambda(p)
mgr = LambdaManager(session_factory)
self.addCleanup(mgr.remove, pl)
result = mgr.publish(pl, "Dev", role=ROLE)
self.assert_items(
result,
{
"Description": "cloud-custodian lambda policy",
"FunctionName": "custodian-ec2-encrypted-vol",
"Handler": "custodian_policy.run",
"MemorySize": 512,
"Runtime": "python2.7",
"Timeout": 60,
},
)
events = session_factory().client("events")
result = events.list_rules(NamePrefix="custodian-ec2-encrypted-vol")
self.assert_items(
result["Rules"][0],
{"State": "ENABLED", "Name": "custodian-ec2-encrypted-vol"},
)
self.assertEqual(
json.loads(result["Rules"][0]["EventPattern"]),
{
"source": ["aws.ec2"],
"detail": {"state": ["pending"]},
"detail-type": ["EC2 Instance State-change Notification"],
},
)
def test_cwe_asg_instance(self):
session_factory = self.replay_flight_data("test_cwe_asg", zdata=True)
p = self.load_policy(
{
"resource": "asg",
"name": "asg-spin-detector",
"mode": {"type": "asg-instance-state", "events": ["launch-failure"]},
}, session_factory=session_factory)
pl = PolicyLambda(p)
mgr = LambdaManager(session_factory)
self.addCleanup(mgr.remove, pl)
result = mgr.publish(pl, "Dev", role=ROLE)
self.assert_items(
result,
{
"FunctionName": "custodian-asg-spin-detector",
"Handler": "custodian_policy.run",
"MemorySize": 512,
"Runtime": "python2.7",
"Timeout": 60,
},
)
events = session_factory().client("events")
result = events.list_rules(NamePrefix="custodian-asg-spin-detector")
self.assert_items(
result["Rules"][0],
{"State": "ENABLED", "Name": "custodian-asg-spin-detector"},
)
self.assertEqual(
json.loads(result["Rules"][0]["EventPattern"]),
{
"source": ["aws.autoscaling"],
"detail-type": ["EC2 Instance Launch Unsuccessful"],
},
)
def test_cwe_security_hub_action(self):
factory = self.replay_flight_data('test_mu_cwe_sechub_action')
p = self.load_policy({
'name': 'sechub',
'resource': 'account',
'mode': {
'type': 'hub-action'}},
session_factory=factory,
config={'account_id': ACCOUNT_ID})
mu_policy = PolicyLambda(p)
events = mu_policy.get_events(factory)
self.assertEqual(len(events), 1)
hub_action = events.pop()
self.assertEqual(
json.loads(hub_action.cwe.render_event_pattern()),
{'resources': [
'arn:aws:securityhub:us-east-1:644160558196:action/custom/sechub'],
'source': ['aws.securityhub'],
'detail-type': [
'Security Hub Findings - Custom Action', 'Security Hub Insight Results'
]})
hub_action.cwe = cwe = mock.Mock(CloudWatchEventSource)
cwe.get.return_value = False
cwe.update.return_value = True
cwe.add.return_value = True
self.assertEqual(repr(hub_action), "<SecurityHub Action sechub>")
self.assertEqual(
hub_action._get_arn(),
"arn:aws:securityhub:us-east-1:644160558196:action/custom/sechub")
self.assertEqual(
hub_action.get(mu_policy.name), {'event': False, 'action': None})
hub_action.add(mu_policy)
self.assertEqual(
| |
# coding=utf-8
"""
Testing qiki word.py
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import calendar
import inspect
import json
import re
import sys
import threading
import time
import unicodedata
import unittest
import uuid
import mysql.connector
import six
import qiki
from qiki.number import hex_from_bytes
from qiki.number import type_name
from qiki.word import is_iterable
from qiki.word import SubjectedVerb
try:
import secure.credentials
except ImportError:
secure = None
print("""
Example secure/credentials.py
for_unit_testing_database = dict(
language='MySQL',
host= 'localhost',
port= 8000,
user= 'example_user',
password='<PASSWORD>',
database='example_database',
table= 'word',
engine= 'MEMORY', # About 2x faster unit testing.
txt_type='VARCHAR(255)', # Because MEMORY doesn't support TEXT.
)
You also need to create an empty (0 bytes) file named secure/__init__.py
Why? See https://stackoverflow.com/questions/10863268/how-is-an-empty-init-py-file-correct
Short answer: that makes 'secure' into a package so we can import from it 'credentials.py'
which is a module.
In MySQL you will need to create the example_database and the example_user.
LexMySQL will create the table. Example MySQL commands to do all that:
CREATE DATABASE `example_database`;
CREATE USER 'example_user'@'localhost';
ALTER USER 'example_user'@'localhost'
IDENTIFIED BY '<PASSWORD>';
GRANT CREATE, INSERT, SELECT, DROP
ON `example_database`.*
TO 'example_user'@'localhost';
\n""", end="")
sys.exit(1)
LET_DATABASE_RECORDS_REMAIN = False
# NOTE: False = Each run deletes its table
# True = Each run leaves its table behind, for human examination
# If RANDOMIZE_DATABASE_TABLE = True
# then HUNDREDS of tables remain, after running all tests
# If RANDOMIZE_DATABASE_TABLE = False
# then only table 'word' remains -- and I GUESS it contains the results of
# whatever test HAPPENED to run last. So this is mainly useful when
# running individual tests.
# Either way, each test starts with an empty, virgin lex.
RANDOMIZE_DATABASE_TABLE = False
# NOTE: False = table name e.g. 'word'
# See secure.credentials.for_unit_testing_database.table for actual table name.
# True = this supports concurrent test runs (e.g. Word2.7 and Word3.9).
# Table name e.g. word_ce09954b2e784cd8811b640079497568
# CAUTION: if LET_DATABASE_RECORDS_REMAIN is also True, then
# HUNDREDS of tables will accumulate after each full test run.
TEST_ASTRAL_PLANE = True # Test txt with Unicode characters on an astral-plane (beyond the base 64K)
SHOW_UTF8_EXAMPLES = False # Prints a few unicode test strings in both \u escape syntax and UTF-8 hexadecimal.
# e.g. "\u262e on earth" in utf8 is E298AE206F6E206561727468
class TestFlavors(object):
"""Run each test derived from WordTests using the following variations."""
SPECS = [
# dict(name="LexMySQL/InnoDB", lex_class=qiki.LexMySQL, engine='InnoDB'),
dict(name="LexMySQL/Memory", lex_class=qiki.LexMySQL, engine='MEMORY'),
dict(name="LexInMemory", lex_class=qiki.LexInMemory),
]
NON_CREDENTIAL_SPECS = 'name', 'lex_class' # SPECS column names
SQL_LEXES = [qiki.LexMySQL] # SPECS row subset (of lex_class column values)
counts = {spec['name'] : 0 for spec in SPECS}
@classmethod
def all_specs(cls):
return [s for s in cls.SPECS]
@classmethod
def all_sql_specs(cls):
return [s for s in cls.SPECS if s['lex_class'] in cls.SQL_LEXES]
@classmethod
def credentials_from_specs(cls, spec):
"""
Extract credential modifiers from specs.
I know I know, this should be part of a "Spec" class, along with much else.
"""
return {k: v for k, v in spec.items() if k not in cls.NON_CREDENTIAL_SPECS}
@classmethod
def count_test(cls, spec):
cls.counts[spec['name']] += 1
@classmethod
def report(cls):
return "\n".join(cls.report_lines())
@classmethod
def report_lines(cls):
for spec in cls.SPECS:
name = spec['name']
yield "{count:5d} tests on {name}".format(
name=name,
count=cls.counts[name],
)
class TestBaseClass(unittest.TestCase):
"""Base class for all qiki.Word tests."""
# noinspection PyPep8Naming
def tearDownModule():
print(TestFlavors.report()) # Run once after all tests.
# mysql_client_version = subprocess.Popen(
# 'mysql --version',
# shell=True,
# stdout=subprocess.PIPE
# ).stdout.read().decode('unicode_escape').strip()
def version_report():
print("Python version", ".".join(str(x) for x in sys.version_info))
# EXAMPLE: Python version 2.7.14.final.0
# EXAMPLE: Python version 2.7.16.final.0
# EXAMPLE: Python version 3.4.3.final.0
# EXAMPLE: Python version 3.5.4.final.0
# EXAMPLE: Python version 3.6.8.final.0
# EXAMPLE: Python version 3.7.3.final.0
# EXAMPLE: Python version 3.8.0.alpha.4
print("MySQL Python Connector version", mysql.connector.version.VERSION_TEXT)
# EXAMPLE: MySQL Python Connector version 2.0.3
# EXAMPLE: MySQL Python Connector version 2.2.2b1
# EXAMPLE: MySQL Python Connector version 8.0.16
# print("MySQL Client version " + mysql_client_version + "\n", end="")
# NOTE: Python 2 quirks in THIS print() call:
# appends a \n to the subprocess output
# ignores the end= parameter
# never outputs a newline, unless it's explicit in the string
# EXAMPLE: MySQL Client version mysql Ver 14.14 Distrib 5.7.24, for Win64 (x86_64)
# NOTE: Maybe we don't care what mysql --version says.
# Isn't the python connector the "real" "client" we're running here?
credentials = secure.credentials.for_unit_testing_database.copy()
try:
lex = qiki.LexMySQL(**credentials)
except qiki.LexMySQL.ConnectError:
print("(cannot determine MySQL server version)")
else:
server_version = lex.server_version()
# lex.uninstall_to_scratch()
# TODO: Wasn't uninstall_to_scratch() rather brutish if all we did was server_version()?
lex.disconnect()
print("MySQL Server version", server_version)
# EXAMPLE: MySQL Server version 5.7.24
version_report()
class SafeNameTests(TestBaseClass):
"""Test LexMySQL naming of tables and engines."""
# NOTE: For some reason, PyCharm got stupid about which secure.credentials were active when
# unit testing, while a project was loaded with another secure.credentials. Hence the
# following noinspection. The correct package imports when run however.
# noinspection PyUnresolvedReferences
def test_table_name_at_creation_good(self):
credentials = secure.credentials.for_unit_testing_database.copy()
def good_table_name(name):
credentials['table'] = name
lex = qiki.LexMySQL(**credentials)
lex.uninstall_to_scratch() # in case left over from a botched test
lex.install_from_scratch()
self.assertEqual('verb', lex['define'].obj.txt)
# NOTE: Former error about None having no txt attribute goes away by deleting the table.
# But it shouldn't happen now that we uninstall and install.
lex.uninstall_to_scratch()
lex.disconnect()
good_table_name('word_with_no_funny_business')
good_table_name('word')
good_table_name('w')
def test_table_name_at_creation_bad(self):
credentials = secure.credentials.for_unit_testing_database.copy()
def bad_table_name(name):
credentials['table'] = name
with self.assertRaises(qiki.LexMySQL.IllegalTableName):
qiki.LexMySQL(**credentials)
bad_table_name('')
bad_table_name('word_with_backtick_`_oops')
bad_table_name('word_with_single_quote_\'_oops')
bad_table_name('word_with_double_quote_\"_oops')
bad_table_name('word_ending_in_backslash_oops_\\')
bad_table_name('word with spaces oops')
def test_engine_name_good(self):
credentials = secure.credentials.for_unit_testing_database.copy()
# 2018.0712 - long hang on test 14 of 255.
# Restarting the MySQL server got it unstuck, but it just keeps
# happening now. So something is effed up.
# After implementing LexMemory and switching (clumsily) back and forth
# to LexMySQL.
# Then specifically was trying to get (groan) show_version to work.
# Oh! It was failure to lex.disconnect() from another test.
def good_engine_name(engine_name):
credentials['engine'] = engine_name
lex = qiki.LexMySQL(**credentials)
self.assertEqual('verb', lex['define'].obj.txt)
lex.uninstall_to_scratch()
lex.disconnect()
good_engine_name('MEMORY')
good_engine_name('InnoDB')
def test_engine_name_bad(self):
credentials = secure.credentials.for_unit_testing_database.copy()
def bad_engine_name(name):
credentials['table'] = 'word_for_engine_name_test'
credentials['engine'] = name
with self.assertRaises(qiki.LexMySQL.IllegalEngineName):
qiki.LexMySQL(**credentials)
# Why did this break once (a few times in a row) for 3 of these bad engine names?
# Seemed to fix itself after a few hours. Or switching python versions.
bad_engine_name('MEMORY_oops_\'')
bad_engine_name('MEMORY_oops_\"')
bad_engine_name('MEMORY_oops_`')
bad_engine_name('MEMORY_oops_\\')
bad_engine_name('MEMORY oops')
def test_engine_name_bad_explicit_install(self):
credentials = secure.credentials.for_unit_testing_database.copy()
def bad_engine_name(name):
credentials['table'] = 'word_for_engine_name_test_explicit_install'
lex = qiki.LexMySQL(**credentials)
lex.uninstall_to_scratch()
lex._engine = name
with self.assertRaises(qiki.LexMySQL.IllegalEngineName):
lex.install_from_scratch()
lex.disconnect()
bad_engine_name('MEMORY_backtick_`_oops')
bad_engine_name('MEMORY_quote_\'_oops')
bad_engine_name('MEMORY_quote_\"_oops')
def test_table_name_later_bad(self):
credentials = secure.credentials.for_unit_testing_database.copy()
credentials['table'] = 'innocent_table_name_to_start_with'
lex = qiki.LexMySQL(**credentials)
lex.uninstall_to_scratch() # in case left over from a botched test
lex.install_from_scratch()
self.assertEqual('verb', lex['define'].obj.txt)
lex._table = 'evil_table_name_later_`\'_\"_oops_\\"'
with self.assertRaises(qiki.LexMySQL.IllegalTableName):
self.assertEqual('verb', lex['define'].obj.txt)
lex._table = 'innocent_table_name_to_start_with'
lex.uninstall_to_scratch()
lex.disconnect()
class LexErrorTests(TestBaseClass):
"""Try to generate common errors with instatiating a Lex."""
def test_bad_password(self):
"""
Example of the entire bad-password exception message:
1045 (28000): Access denied for user 'unittest'@'localhost' (using password: YES)
"""
credentials = secure.credentials.for_unit_testing_database.copy()
credentials['password'] = '<PASSWORD>'
# noinspection SpellCheckingInspection,SpellCheckingInspection
with six.assertRaisesRegex(self, qiki.LexSentence.ConnectError, r'Access denied'):
# EXAMPLE: 1045 (28000): Access denied for user 'unittest'@'localhost' (using password: YES)
qiki.LexMySQL(**credentials)
# TODO: Prevent ResourceWarning in Python 3.4 - 3.6
# EXAMPLE: (appears every time running tests Word3.4, Word3.5, Word3.6)
# ResourceWarning: unclosed <socket.socket fd=524, family=AddressFamily.AF_INET,
# type=SocketKind.SOCK_STREAM, proto=6, laddr=('127.0.0.1', 59546),
# raddr=('127.0.0.1', 3306)>
# EXAMPLE: (intermittently appears below "OK" after all tests pass)
# sys:1: ResourceWarning: unclosed file <_io.BufferedReader name=3>
# EXAMPLE: (intermittently appears below "OK" after all tests pass)
# C:\Python34\lib\importlib\_bootstrap.py:2150: ImportWarning: sys.meta_path is empty
def test_two_lex(self):
lex1 = qiki.LexMySQL(**secure.credentials.for_unit_testing_database)
max_start = lex1.max_idn()
lex1.define(lex1.noun(), 'borg')
self.assertEqual(max_start + 1, lex1.max_idn())
lex2 = qiki.LexMySQL(**secure.credentials.for_unit_testing_database)
self.assertEqual(max_start + 1, lex2.max_idn())
# lex2.uninstall_to_scratch() # Why does this cause infinite hang?
lex2.disconnect()
lex1.uninstall_to_scratch()
lex1.disconnect()
def test_connection_neglect(self):
"""Test automatic reconnection of the Lex."""
lex = qiki.LexMySQL(**secure.credentials.for_unit_testing_database)
self.assertEqual(1, lex.noun('noun').num)
lex._simulate_connection_neglect()
self.assertEqual(1, lex.noun('noun').num)
lex.disconnect()
# noinspection PyUnresolvedReferences
class WordTests(TestBaseClass):
"""Base class for qiki.Word tests that use a standard, empty self.lex. Has no tests itself."""
first_setup = True
def __init__(self, *args, **kwargs):
super(WordTests, self).__init__(*args, **kwargs)
self.all_flavors()
self.flavor_spec = None
def all_flavors(self):
self.flavor_specs = TestFlavors.all_specs()
def only_sql_flavors(self):
self.flavor_specs = TestFlavors.all_sql_specs()
def run(self, result=None):
"""
Run the unit tests on each of the Lex classes.
If one version of the tests raises an exception, the other version may never be tested.
This may slightly confuse the test framework on the total number of tests being run.
"""
# | |
<reponame>atrettel/sheardata
#!/usr/bin/env python3
# Copyright (C) 2020-2021 <NAME>
#
# SPDX-License-Identifier: MIT
import csv
import math
import sqlite3
import sheardata as sd
import sys
conn = sqlite3.connect( sys.argv[1] )
cursor = conn.cursor()
cursor.execute( "PRAGMA foreign_keys = ON;" )
flow_class = sd.FC_DUCT_FLOW
year = 1947
study_number = 1
study_id = sd.add_study(
cursor,
flow_class_id=flow_class,
year=year,
study_number=study_number,
study_type_id=sd.ST_EXPERIMENT,
)
sd.add_study_source( cursor, study_id, "HuebscherRG+1947+eng+JOUR", sd.PRIMARY_SOURCE )
mass_density_note = sd.add_note(
cursor,
"../data/{:s}/note_mass_density.tex".format( study_id ),
)
# Distance between pressure taps
#
# p. 129
#
# \begin{quote}
# Static pressure explorations made within the rectangular duct on a single
# plane perpendicular to the long axis of the duct disclosed no measureable
# variation over the cross-section. A single internal static pressure taken at
# the axis of the duct at several locations along the duct length served to
# define the static friction pressure gradient.
#
# It was found that insertion of the tubes into the duct caused sufficient
# resistance to decrease the air flow in the round and square ducts. Since
# this eeffect was negligible when insertion was less than 2 in. the tubes were
# never inserted for more than 2 in. Openings for static tube insertion were
# located every 3 ft along the length of the duct.
# \end{quote}
#
# Note that this does not give any information about the distance that was used
# in the study, just that it was in 3 foot increments.
class Duct:
aspect_ratio = None
length = None
def __init__( self, aspect_ratio, length ):
self.aspect_ratio = sd.sdfloat(aspect_ratio)
self.length = sd.sdfloat(length)
ducts = {}
globals_filename = "../data/{:s}/globals.csv".format( study_id )
with open( globals_filename, "r" ) as globals_file:
globals_reader = csv.reader(
globals_file,
delimiter=",",
quotechar='"', \
skipinitialspace=True,
)
next(globals_reader)
for globals_row in globals_reader:
ducts[str(globals_row[0])] = Duct(
float(globals_row[1]),
float(globals_row[2]) * sd.METERS_PER_FOOT
)
series_number = 0
for duct in ducts:
duct_globals_filename = "../data/{:s}/{:s}_duct_globals.csv".format(
study_id,
duct.lower(),
)
with open( duct_globals_filename, "r" ) as duct_globals_file:
duct_globals_reader = csv.reader(
duct_globals_file,
delimiter=",",
quotechar='"', \
skipinitialspace=True,
)
next(duct_globals_reader)
for duct_globals_row in duct_globals_reader:
series_number += 1
test_number = int(duct_globals_row[0])
originators_identifier = "{:s} duct {:d}".format(
duct,
test_number
)
temperature = sd.fahrenheit_to_kelvin( sd.sdfloat(duct_globals_row[2]) )
mass_density = sd.sdfloat(duct_globals_row[4]) * sd.KILOGRAM_PER_POUND_MASS / sd.METERS_PER_FOOT**3.0
bulk_velocity = sd.sdfloat(duct_globals_row[5]) * sd.METERS_PER_FOOT / sd.SECONDS_PER_MINUTE
hydraulic_diameter = sd.sdfloat(duct_globals_row[6]) * sd.METERS_PER_INCH
pressure_gradient = sd.sdfloat(duct_globals_row[7]) * sd.PASCALS_PER_INCH_OF_WATER / sd.METERS_PER_FOOT
Re_bulk_value = sd.sdfloat(duct_globals_row[10])
# Duct dimensions
#
# p. 128
#
# \begin{quote}
# The first part of the paper gives the results of an experimental
# investigation using three ducts of different forms but each of 8
# in. equivalent diameter. The duct sizes were 8 in. ID round, 8
# in. square and 4.5 in. by 36 in. rectangular (8:1 aspect ratio).
# Air velocities used ranged from 300 to 9310 fpm.
# \end{quote}
#
# However, the hydraulic diameter column in tables 2 to 4 makes it
# clear that these dimensions are only approximate. Indeed, the
# rectangular duct appears to vary in cross section between tests,
# while the round and square ducts have the same cross section.
#
# For the rectangular case, assume that the aspect ratio is
# constant.
height = None
width = None
half_height = None
if ( duct == "Square" ):
height = hydraulic_diameter
width = hydraulic_diameter
half_height = 0.5 * height
elif ( duct == "Rectangular" ):
height = 0.5 * ( 1.0 + ducts[duct].aspect_ratio ) * hydraulic_diameter / ducts[duct].aspect_ratio
width = ducts[duct].aspect_ratio * height
half_height = 0.5 * height
# Uncertainty of wall shear stress measurements
#
# p. 128
#
# \begin{quote}
# The estimated error in any flow measurement due to all sources,
# including the assumption of constant nozzle coefficient, did not
# exceed $\pm 2$ percent.
# \end{quote}
#
# p. 129
#
# \begin{quote}
# The maximum sensitivity of the five gages was $\pm 0.02$ in. of
# water, with an accuracy within this value over the entire range.
# \end{quote}
#
# The first number about the flow rate measurements appears
# reasonable, but the second number about the pressure drop
# measurements creates extremely large uncertainties for the lower
# bulk Reynolds number cases. It appears that this "maximum" is
# perhaps far too high.
wall_shear_stress = 0.25 * hydraulic_diameter * pressure_gradient
fanning_friction_factor = 2.0 * wall_shear_stress / ( mass_density * bulk_velocity**2.0 )
kinematic_viscosity = bulk_velocity * hydraulic_diameter / Re_bulk_value
dynamic_viscosity = mass_density * kinematic_viscosity
Re_bulk = bulk_velocity * hydraulic_diameter / kinematic_viscosity
friction_velocity = ( wall_shear_stress / mass_density )**0.5
viscous_length_scale = kinematic_viscosity / friction_velocity
Re_tau = None
if ( duct == "Round" ):
Re_tau = 0.5 * hydraulic_diameter / viscous_length_scale
else:
Re_tau = half_height / viscous_length_scale
speed_of_sound = sd.ideal_gas_speed_of_sound( temperature )
Ma_bulk = bulk_velocity / speed_of_sound
Ma_tau = friction_velocity / speed_of_sound
series_id = None
if ( duct == "Round" ):
series_id = sd.add_series(
cursor,
flow_class_id=flow_class,
year=year,
study_number=study_number,
series_number=series_number,
number_of_dimensions=2,
coordinate_system_id=sd.CS_CYLINDRICAL,
series_external_ids={ sd.C_SELF : originators_identifier },
)
sd.update_series_geometry(
cursor,
series_id,
sd.GM_ELLIPTICAL
)
else:
series_id = sd.add_series(
cursor,
flow_class_id=flow_class,
year=year,
study_number=study_number,
series_number=series_number,
number_of_dimensions=2,
coordinate_system_id=sd.CS_RECTANGULAR,
series_external_ids={ sd.C_SELF : originators_identifier },
)
sd.update_series_geometry(
cursor,
series_id,
sd.GM_RECTANGULAR
)
# TODO: set air as the working fluid.
station_number = 1
station_id = sd.add_station(
cursor,
flow_class_id=flow_class,
year=year,
study_number=study_number,
series_number=series_number,
station_number=station_number,
station_external_ids={ sd.C_SELF : originators_identifier },
)
sd.mark_station_as_periodic( cursor, station_id )
# p. 128
#
# \begin{quote}
# The mean air velocity was determined from the measurement of the
# air quantity and the duct area. \ldots Air quantity was
# measured by the use of five cast aluminum nozzles made
# approximately to ASME log-radius, low-ratio proportions and
# equiped with throat static taps. \ldots The nozzles were
# calibrated in place by impact tube traverses at the throat over
# the full flow range.
# \end{quote}
mt_bulk_velocity = sd.MT_IMPACT_TUBE
sd.set_station_value( cursor, station_id, sd.Q_HYDRAULIC_DIAMETER, hydraulic_diameter, )
sd.set_station_value( cursor, station_id, sd.Q_DEVELOPMENT_LENGTH, ducts[duct].length, )
sd.set_station_value( cursor, station_id, sd.Q_OUTER_LAYER_DEVELOPMENT_LENGTH, ducts[duct].length / hydraulic_diameter, )
sd.set_station_value( cursor, station_id, sd.Q_CROSS_SECTIONAL_ASPECT_RATIO, ducts[duct].aspect_ratio, )
sd.set_station_value( cursor, station_id, sd.Q_BULK_VELOCITY, bulk_velocity, value_type_id=sd.VT_BOTH_AVERAGES, meastech_ids=[mt_bulk_velocity], )
sd.set_station_value( cursor, station_id, sd.Q_BULK_REYNOLDS_NUMBER, Re_bulk, value_type_id=sd.VT_BOTH_AVERAGES, )
sd.set_station_value( cursor, station_id, sd.Q_BULK_MACH_NUMBER, Ma_bulk, value_type_id=sd.VT_BOTH_AVERAGES, )
if ( duct != "Round" ):
sd.set_station_value( cursor, station_id, sd.Q_CROSS_SECTIONAL_HEIGHT, height, )
sd.set_station_value( cursor, station_id, sd.Q_CROSS_SECTIONAL_WIDTH, width, )
sd.set_station_value( cursor, station_id, sd.Q_CROSS_SECTIONAL_HALF_HEIGHT, half_height, )
# This set of data only considers wall quantities.
point_number = 1
point_id = sd.add_point(
cursor,
flow_class_id=flow_class,
year=year,
study_number=study_number,
series_number=series_number,
station_number=station_number,
point_number=point_number,
point_label_id=sd.PL_WALL,
)
# TODO: Correct this assumption later.
#
# Duct material
#
# p. 128
#
# \begin{quote}
# The three ducts were fabricated from 16 gage galvanized sheet
# metal to provide the necessary rigidity against deflection.
# \end{quote}
#
# p. 129
#
# \begin{quote}
# The internal roughness of all three ducts was typical of
# galvanized iron, very little roughness was contributed by the
# joints. The hydraulic roughness magnitude cannot be measured
# geometrically but can be deduced from the test results.
# \end{quote}
for quantity_id in [ sd.Q_ROUGHNESS_HEIGHT,
sd.Q_INNER_LAYER_ROUGHNESS_HEIGHT,
sd.Q_OUTER_LAYER_ROUGHNESS_HEIGHT, ]:
sd.set_labeled_value(
cursor,
station_id,
quantity_id,
sd.PL_WALL,
sd.sdfloat(0.0),
meastech_ids=[sd.MT_ASSUMPTION],
)
current_notes = []
if ( test_number == 17 and duct == "Square" ):
current_notes = [mass_density_note]
# p. 129
mt_wall_shear_stress = sd.MT_MOMENTUM_BALANCE
sd.set_labeled_value( cursor, station_id, sd.Q_MASS_DENSITY, sd.PL_WALL, mass_density, value_type_id=sd.VT_BOTH_AVERAGES, note_ids=current_notes, )
sd.set_labeled_value( cursor, station_id, sd.Q_KINEMATIC_VISCOSITY, sd.PL_WALL, kinematic_viscosity, value_type_id=sd.VT_BOTH_AVERAGES, )
sd.set_labeled_value( cursor, station_id, sd.Q_DYNAMIC_VISCOSITY, sd.PL_WALL, dynamic_viscosity, value_type_id=sd.VT_BOTH_AVERAGES, )
sd.set_labeled_value( cursor, station_id, sd.Q_TEMPERATURE, sd.PL_WALL, temperature, value_type_id=sd.VT_BOTH_AVERAGES, )
sd.set_labeled_value( cursor, station_id, sd.Q_STREAMWISE_VELOCITY, sd.PL_WALL, sd.sdfloat( 0.0, 0.0 ), value_type_id=sd.VT_BOTH_AVERAGES, )
sd.set_labeled_value( cursor, station_id, sd.Q_DISTANCE_FROM_WALL, sd.PL_WALL, sd.sdfloat( 0.0, 0.0 ), value_type_id=sd.VT_BOTH_AVERAGES, )
sd.set_labeled_value( cursor, station_id, sd.Q_OUTER_LAYER_COORDINATE, sd.PL_WALL, sd.sdfloat( 0.0, 0.0 ), value_type_id=sd.VT_BOTH_AVERAGES, )
sd.set_labeled_value( cursor, station_id, sd.Q_SPEED_OF_SOUND, sd.PL_WALL, speed_of_sound, value_type_id=sd.VT_BOTH_AVERAGES, meastech_ids=[sd.MT_ASSUMPTION], )
sd.set_labeled_value( cursor, station_id, sd.Q_SHEAR_STRESS, sd.PL_WALL, wall_shear_stress, value_type_id=sd.VT_BOTH_AVERAGES, meastech_ids=[mt_wall_shear_stress], )
sd.set_labeled_value( cursor, station_id, sd.Q_FANNING_FRICTION_FACTOR, sd.PL_WALL, fanning_friction_factor, value_type_id=sd.VT_BOTH_AVERAGES, meastech_ids=[mt_wall_shear_stress], )
sd.set_labeled_value( cursor, station_id, sd.Q_FRICTION_VELOCITY, sd.PL_WALL, friction_velocity, value_type_id=sd.VT_BOTH_AVERAGES, )
sd.set_labeled_value( cursor, station_id, sd.Q_VISCOUS_LENGTH_SCALE, sd.PL_WALL, viscous_length_scale, value_type_id=sd.VT_BOTH_AVERAGES, )
sd.set_labeled_value( cursor, station_id, sd.Q_FRICTION_REYNOLDS_NUMBER, sd.PL_WALL, Re_tau, value_type_id=sd.VT_BOTH_AVERAGES, )
sd.set_labeled_value( cursor, station_id, sd.Q_SEMI_LOCAL_FRICTION_REYNOLDS_NUMBER, sd.PL_WALL, Re_tau, value_type_id=sd.VT_BOTH_AVERAGES, )
sd.set_labeled_value( cursor, station_id, sd.Q_FRICTION_MACH_NUMBER, sd.PL_WALL, Ma_tau, value_type_id=sd.VT_BOTH_AVERAGES, )
sd.set_labeled_value( cursor, station_id, sd.Q_HEAT_FLUX, sd.PL_WALL, sd.sdfloat( 0.0, 0.0 ), value_type_id=sd.VT_BOTH_AVERAGES, meastech_ids=[sd.MT_ASSUMPTION], )
sd.set_labeled_value( cursor, station_id, sd.Q_INNER_LAYER_HEAT_FLUX, sd.PL_WALL, sd.sdfloat( 0.0, 0.0 ), value_type_id=sd.VT_BOTH_AVERAGES, meastech_ids=[sd.MT_ASSUMPTION], )
sd.set_labeled_value( cursor, station_id, sd.Q_FRICTION_TEMPERATURE, sd.PL_WALL, sd.sdfloat( 0.0, 0.0 | |
== history = {}'.format(self._search_history[self._search_history_index]))
logger.debug(' == url = "{}"'.format(url))
logger.debug(' == headers = "{}"'.format(self._headers))
logger.debug(' == post_data = "{}"'.format(post_data))
''' keep server results here '''
new_raw_stations = []
try:
r = self._session.get(url=url, headers=self._headers, params=post_data, timeout=(self._search_timeout, 2 * self._search_timeout))
r.raise_for_status()
new_raw_stations = self._extract_data(json.loads(r.text))
# logger.error('DE \n\n{}'.format(new_raw_stations))
ret = True, len(new_raw_stations), go_back_in_history
except requests.exceptions.RequestException as e:
if logger.isEnabledFor(logging.ERROR):
logger.error(e)
self._raw_stations = []
ret = False, 0, go_back_in_history
''' use server result '''
if len(new_raw_stations) > 0:
self._raw_stations = new_raw_stations[:]
if self._search_return_function:
self._search_return_function(ret)
def _get_search_elements(self, a_search):
'''
get "by search" and "reverse"
values from a search dict.
To be used with the sort function
'''
logger.error('DE search in function is "{}"'.format(a_search))
a_term = a_search['term']
p_data = a_search['post_data']
self.search_by = None
self.reverse = False
if a_search['post_data']:
if 'order' in a_search['post_data'].keys():
self.search_by = a_search['post_data']['order']
if 'reverse' in a_search['post_data']:
self.reverse = True if a_search['post_data']['reverse'] == 'true' else False
logger.error('DE search by was "{}"'.format(self.search_by))
if self.search_by is None:
a_type = a_search['type']
if a_type == 'byname':
self.search_by = 'name'
elif a_type == 'topvote':
self.search_by = 'votes'
logger.error('DE search by is votes')
elif a_type == 'clickcount':
self.search_by = 'clickcount'
elif a_type == 'bitrate':
self.search_by = 'bitrate'
elif a_type == 'codec':
self.search_by = 'codec'
elif a_type == 'country':
self.search_by = 'country'
elif a_type == 'state':
self.search_by = 'state'
elif a_type == 'language':
self.search_by = 'language'
elif a_type == 'tags':
self.search_by = 'tags'
if self.search_by is None:
if p_data:
if 'name' in p_data.keys():
self.search_by = 'name'
logger.error('DE search by is name (default)')
if self.search_by is None:
self.search_by = 'name'
logger.error('DE search by is name (default)')
logger.error('DE search by is "{}"'.format(self.search_by))
def get_next(self, search_term, start=0, stop=None):
if search_term:
for n in range(start, len(self._raw_stations)):
if self._search_in_station(search_term, n):
if logger.isEnabledFor(logging.DEBUG):
logger.debug('forward search term "{0}" found at {1}'.format(search_term, n))
return n
""" if not found start from list top """
for n in range(0, start):
if self._search_in_station(search_term, n):
if logger.isEnabledFor(logging.DEBUG):
logger.debug('forward search term "{0}" found at {1}'.format(search_term, n))
return n
""" if not found return None """
if logger.isEnabledFor(logging.DEBUG):
logger.debug('forward search term "{}" not found'.format(search_term))
return None
else:
return None
def get_previous(self, search_term, start=0, stop=None):
if search_term:
for n in range(start, -1, -1):
if self._search_in_station(search_term, n):
if logger.isEnabledFor(logging.DEBUG):
logger.debug('backward search term "{0}" found at {1}'.format(search_term, n))
return n
""" if not found start from list end """
for n in range(len(self._raw_stations) - 1, start, -1):
if self._search_in_station(search_term, n):
if logger.isEnabledFor(logging.DEBUG):
logger.debug('backward search term "{0}" found at {1}'.format(search_term, n))
return n
""" if not found return None """
if logger.isEnabledFor(logging.DEBUG):
logger.debug('backward search term "{}" not found'.format(search_term))
return None
else:
return None
def _search_in_station(self, a_search_term, a_station):
guide = (
'name',
'country',
'codec',
'tags',
'bitrate',
'language'
)
for n in guide:
source = self._raw_stations[a_station][n]
if isinstance(source, int):
''' this is one of the numerical data '''
source = str(source)
if a_search_term.lower() in source.lower():
return True
return False
def _format_url(self, a_search):
if a_search['type'] in ('topvote',
'topclick',
'lastclick',
'lastchange',
'changed',
'improvable',
'broken',
):
url = 'http://{0}{1}'.format(
self._server,
'/json/stations/{}'.format(a_search['type'])
)
if a_search['term'] not in ('', '0'):
url += '/{}'.format(a_search['term'])
self._search_type = 0
elif a_search['type'] in ('byuuid',
'byname',
'bynameexact',
'bycodec',
'bycodecexact',
'bycountry',
'bycountryexact',
'bycountrycodeexact',
'bystate',
'bystateexact',
'bylanguage',
'bylanguageexact',
'bytag',
'bytagexact',
):
url = 'http://{0}{1}/{2}'.format(
self._server,
'/json/stations/{}'.format(a_search['type']),
a_search['term']
)
self._search_type = 1
elif a_search['type'] == 'search':
url = 'http://{0}{1}'.format(
self._server,
'/json/stations/search'
)
self._search_type = 2
return url
def format_empty_line(self, width):
if self._output_format == 0:
return -1, ' '
info = (
(),
('bitrate', ),
('votes', 'bitrate'),
('votes', 'clickcount', 'bitrate'),
('votes', 'clickcount', 'bitrate', 'country'),
('votes', 'clickcount', 'bitrate', 'country', 'language'),
('votes', 'clickcount', 'bitrate', 'country', 'state', 'language'),
('votes', 'clickcount', 'bitrate', 'codec', 'country', 'state', 'language', 'tags')
)
out = ['', '']
i_out = []
for i, n in enumerate(info[self._output_format]):
i_out.append(u'│' + ' ' * self._columns_width[n])
out[1] = ''.join(i_out)
name_width = width-len(out[1])
out[0] = ' ' * name_width
if PY3:
return -1, '{0}{1}'.format(*out)
else:
return -1 , '{0}{1}'.format(
out[0],
out[1].encode('utf-8', 'replace')
)
def format_station_line(self, id_in_list, pad, width):
''' Create a formated line for a station
Parameters
----------
id_in_list
id in list of stations (0..len-1)
pad
length of NUMBER
width
final length of created string
Returns
-------
A string of the following format:
NUMBER. STATION NAME [INFO]
where:
NUMBER
Right padded counter (id_in_list + 1)
STATION NAME
Left padded station name
INFO
Station info. Depending on window width, it can be:
[Votes: XX, Clicks: XX, Bitrate: XXXkb, Country: XXXX],
[Votes: XX, Clicks: XX, Bitrate: XXXkb],
[XXXX v, XXXX, cl, XXXkb],
[Bitrate: XXXkb], or
empty string
'''
info = (u'',
u' {0}{1}kb',
u' {0}{1}│{2}kb',
u' {0}{1}│{2}│{3}kb',
u' {0}{1}│{2}│{3}kb│{4}',
u' {0}{1}│{2}│{3}kb│{4}│{5}',
u' {0}{1}│{2}│{3}kb│{4}│{5}│{6}',
u' {0}{1}│{2}│{3}kb│{4}│{5}│{6}│{7}│{8}',
)
self._get_output_format(width)
# logger.error('DE self._output_format = {}'.format(self._output_format))
out = ['{0}. '.format(str(id_in_list + 1).rjust(pad)), '', '']
# format info field
pl = u'├' if self._raw_stations[id_in_list]['played'] else u'│'
if self._output_format == 7:
# full with state
out[2] = ' ' + info[self._output_format].format(
pl,
str(self._raw_stations[id_in_list]['votes']).rjust(self._columns_width['votes'])[:self._columns_width['votes']],
str(self._raw_stations[id_in_list]['clickcount']).rjust(self._columns_width['clickcount'])[:self._columns_width['clickcount']],
str(self._raw_stations[id_in_list]['bitrate']).rjust(self._columns_width['bitrate']-2)[:self._columns_width['bitrate']-2],
self._raw_stations[id_in_list]['codec'].rjust(self._columns_width['codec'])[:self._columns_width['codec']],
self._raw_stations[id_in_list]['country'].ljust(self._columns_width['country'])[:self._columns_width['country']],
self._raw_stations[id_in_list]['state'].ljust(self._columns_width['state'])[:self._columns_width['state']],
self._raw_stations[id_in_list]['language'].ljust(self._columns_width['language'])[:self._columns_width['language']],
self._raw_stations[id_in_list]['tags'].ljust(self._columns_width['tags'])[:self._columns_width['tags']]
)
if self._output_format == 6:
# full with state
out[2] = ' ' + info[self._output_format].format(
pl,
str(self._raw_stations[id_in_list]['votes']).rjust(self._columns_width['votes'])[:self._columns_width['votes']],
str(self._raw_stations[id_in_list]['clickcount']).rjust(self._columns_width['clickcount'])[:self._columns_width['clickcount']],
str(self._raw_stations[id_in_list]['bitrate']).rjust(self._columns_width['bitrate']-2)[:self._columns_width['bitrate']-2],
self._raw_stations[id_in_list]['country'].ljust(self._columns_width['country'])[:self._columns_width['country']],
self._raw_stations[id_in_list]['state'].ljust(self._columns_width['state'])[:self._columns_width['state']],
self._raw_stations[id_in_list]['language'].ljust(self._columns_width['language'])[:self._columns_width['language']]
)
if self._output_format == 5:
# full with state
out[2] = ' ' + info[self._output_format].format(
pl,
str(self._raw_stations[id_in_list]['votes']).rjust(self._columns_width['votes'])[:self._columns_width['votes']],
str(self._raw_stations[id_in_list]['clickcount']).rjust(self._columns_width['clickcount'])[:self._columns_width['clickcount']],
str(self._raw_stations[id_in_list]['bitrate']).rjust(self._columns_width['bitrate']-2)[:self._columns_width['bitrate']-2],
self._raw_stations[id_in_list]['country'].ljust(self._columns_width['country'])[:self._columns_width['country']],
self._raw_stations[id_in_list]['language'].ljust(self._columns_width['language'])[:self._columns_width['language']]
)
if self._output_format == 4:
# full or condensed info
out[2] = ' ' + info[self._output_format].format(
pl,
str(self._raw_stations[id_in_list]['votes']).rjust(self._columns_width['votes'])[:self._columns_width['votes']],
str(self._raw_stations[id_in_list]['clickcount']).rjust(self._columns_width['clickcount'])[:self._columns_width['clickcount']],
str(self._raw_stations[id_in_list]['bitrate']).rjust(self._columns_width['bitrate']-2)[:self._columns_width['bitrate']-2],
self._raw_stations[id_in_list]['country'].ljust(self._columns_width['country'])[:self._columns_width['country']]
)
elif self._output_format == 2:
out[2] = ' ' + info[self._output_format].format(
pl,
str(self._raw_stations[id_in_list]['votes']).rjust(self._columns_width['votes'])[:self._columns_width['votes']],
str(self._raw_stations[id_in_list]['bitrate']).rjust(self._columns_width['bitrate']-2)[:self._columns_width['bitrate']-2]
)
elif self._output_format == 3:
out[2] = ' ' + info[self._output_format].format(
pl,
str(self._raw_stations[id_in_list]['votes']).rjust(self._columns_width['votes'])[:self._columns_width['votes']],
str(self._raw_stations[id_in_list]['clickcount']).rjust(self._columns_width['clickcount'])[:self._columns_width['clickcount']],
str(self._raw_stations[id_in_list]['bitrate']).rjust(self._columns_width['bitrate']-2)[:self._columns_width['bitrate']-2]
)
elif self._output_format == 1:
# Bitrate only
out[2] = info[self._output_format].format(
pl,
str(self._raw_stations[id_in_list]['bitrate']).rjust(self._columns_width['bitrate']-2)[:self._columns_width['bitrate']-2]
)
name_width = width-len(out[0])-len(out[2])
out[1] = self._fix_cjk_string_width(self._raw_stations[id_in_list]['name'].ljust(name_width)[:name_width], name_width)
if PY3:
# if pl == '╞':
# out[2] += '╡'
return (self._raw_stations[id_in_list]['played'],
'{0}{1}{2}'.format(*out))
else:
# on python 2, strings are already in utf-8
return (self._raw_stations[id_in_list]['played'],
'{0}{1}{2}'.format(
out[0].encode('utf-8', 'replace'),
out[1].encode('utf-8', 'replace'),
out[2].encode('utf-8', 'replace')))
def set_encoding(self, id_in_list, new_encoding):
if id_in_list < len(self._raw_stations):
self._raw_stations[id_in_list]['encoding'] = new_encoding
if logger.isEnabledFor(logging.DEBUG):
logger.debug('New encoding set to "{0}" for station "{1}"'.format(new_encoding, self._raw_stations[id_in_list]['name']))
def _fix_cjk_string_width(self, a_string, width):
while cjklen(a_string) > width:
a_string = a_string[:-1]
return a_string
def _extract_data(self, a_search_result):
ret = []
self._max_len = [0, 0]
if a_search_result:
for n in a_search_result:
ret.append({'name': n['name'].replace(',', ' ')})
ret[-1]['stationuuid'] = n['stationuuid']
ret[-1]['url'] = n['url']
ret[-1]['url_resolved'] = n['url_resolved']
ret[-1]['url'] = n['url']
ret[-1]['played'] = False
ret[-1]['hls'] = n['hls']
ret[-1]['stationuuid'] = n['stationuuid']
ret[-1]['countrycode'] = n['countrycode']
ret[-1]['country'] = n['country']
ret[-1]['codec'] = n['codec']
ret[-1]['state'] = n['state']
ret[-1]['tags'] = n['tags'].replace(',', ', ')
ret[-1]['homepage'] = n['homepage']
if isinstance(n['clickcount'], int):
# old API
ret[-1]['votes'] = n['votes']
ret[-1]['clickcount'] = n['clickcount']
ret[-1]['bitrate'] = n['bitrate']
else:
# new API
ret[-1]['votes'] = int(n['votes'])
ret[-1]['clickcount'] = int(n['clickcount'])
ret[-1]['bitrate'] = int(n['bitrate'])
ret[-1]['language'] = capitalize_comma_separated_string(n['language'])
ret[-1]['encoding'] = ''
self._get_max_len(ret[-1]['votes'],
ret[-1]['clickcount'])
return ret
def _get_max_len(self, votes, clicks):
''' Calculate the maximum length of numeric_data / country
Parameters
----------
votes
Number of station's vote (string)
clicks
Number of station's clicks (string)
numeric_data
Returns
-------
self._max_len
A list [max votes length,
max clickcount length]
'''
numeric_data = (votes, clicks)
# logger.error('DE numeric_data = {}'.format(numeric_data))
min_data = (6, 7)
for i, x in enumerate(numeric_data):
n = str(x)
if len(n) > self._max_len[i]:
self._max_len[i] = len(n) if len(n) > min_data[i] else min_data[i]
def _get_output_format(self, width):
''' Return output format based on window width
Paramaters
----------
width
Window width
Returns
-------
self._output_format
A number 0..5
'''
# now_width = get_terminal_size().columns - 2
if width <= 50:
self._output_format = 0
elif width < 57:
self._output_format = 1
elif width < 65:
self._output_format = 2
elif width < 80:
self._output_format = 3
elif width < 95:
self._output_format = 4
elif width < 120:
self._output_format = 5
elif width < 145:
self._output_format = 6
else:
self._output_format = 7
def _populate_columns_separators(self, a_tuple, width):
ret = []
for i, n in enumerate(a_tuple):
if i == 0:
# logger.error('DE {0} - {1} = {2} - {3}'.format(width, self._columns_width[n], width-self._columns_width[n]-2, n))
ret.append(width - self._columns_width[n] - 2)
else:
# logger.error('{0} -1 - {1} = {2} - {3}'.format(ret[-1], self._columns_width[n], ret[-1] - 1 - self._columns_width[n], n))
ret.append(ret[-1] - 1 - self._columns_width[n])
ret.reverse()
# logger.error('DE \n\nret = | |
<filename>tidepool_data_science_metrics/glucose/glucose.py
import numpy as np
from typing import Tuple
import warnings
import operator
import tidepool_data_science_metrics.common.common as common
# TODO: allow these functions to take in a mmol/L in addition to mg/dL
# TODO: allow these functions to operate on a matrix of glucose column arrays
def glucose_management_index(bg_array: "np.ndarray[np.float64]", round_to_n_digits: int = 3) -> np.float64:
"""
Calculate the Glucose Management Indicator on set of glucose values. GMI indicates the average
A1C level that would be expected based on mean glucose measured
Reference - Glucose Management Indicator (GMI) - https://www.jaeb.org/gmi/
Parameters
----------
bg_array : ndarray
1D array containing data with float or int type.
round_to_n_digits : int, optional
The number of digits to round the result to.
Returns
-------
float
The calculated Glucose Management Indicator
"""
_validate_bg(bg_array)
gmi = 3.31 + (0.02392 * common.mean(bg_array))
return round(gmi, round_to_n_digits)
def percent_values_by_range(
bg_array: "np.ndarray[np.float64]",
lower_bound: int == 1,
upper_bound: int == 1000,
lower_bound_operator: object = operator.ge,
upper_bound_operator: object = operator.lt,
round_to_n_digits: int = 3,
) -> np.float64:
"""
Calculate the percent of bg values are within the specified range.
Parameters
----------
bg_array : ndarray
1D array containing data with float or int type.
lower_bound_operator : operator object
operators include:
operator.ge (greater than or equal to) DEFAULT
operator.gt (greater than)
lower_bound : int
The the lower bound in the calculation range.
upper_bound_operator : operator object
operators include:
operator.lt (less than) DEFAULT
operator.le (less than or equal to)
upper_bound : int
The the upper bound in the calculation range.
round_to_n_digits : int
The number of digits to round the result to. DEFAULT = 3
Returns
-------
float
The percentage of values in the specified range.
"""
_validate_bg(bg_array)
_validate_input(lower_bound, upper_bound)
n_meet_criteria = sum(lower_bound_operator(bg_array, lower_bound) & upper_bound_operator(bg_array, upper_bound))
percent_meet_criteria = n_meet_criteria / len(bg_array) * 100
rounded_percent = np.round(percent_meet_criteria, round_to_n_digits)
return rounded_percent
def percent_values_ge_70_le_180(bg_array: "np.ndarray[np.float64]", round_to_n_digits: int = 3) -> np.float64:
"""
Calculate the percent of values with a glucose values that are
greater-than-or-equal-to (ge) 70 and less-than-or-equal-to (le) 180 mg/dL.
Parameters
----------
bg_array : ndarray
1D array containing data with float or int type.
round_to_n_digits : int
The number of digits to round the result to. DEFAULT = 3
Returns
-------
float
The percent value with in the range between 70 and 180 mg/dL.
"""
return percent_values_by_range(
bg_array,
lower_bound_operator=operator.ge,
lower_bound=70,
upper_bound_operator=operator.le,
upper_bound=180,
round_to_n_digits=round_to_n_digits,
)
def percent_values_lt_70(bg_array: "np.ndarray[np.float64]", round_to_n_digits: int = 3) -> np.float64:
"""
Calculate the percent of values less than (lt) 70 mg/dL.
Parameters
----------
bg_array : ndarray
1D array containing data with float or int type.
round_to_n_digits : int, optional
The number of digits to round the result to.
Returns
-------
int
The percent values below 70.
"""
return percent_values_by_range(
bg_array, lower_bound=1, upper_bound=70, upper_bound_operator=operator.lt, round_to_n_digits=round_to_n_digits,
)
def percent_values_lt_54(bg_array: "np.ndarray[np.float64]", round_to_n_digits: int = 3) -> np.float64:
"""
Calculate the percent of values less than (lt) 54 mg/dL.
Parameters
----------
bg_array : ndarray
1D array containing data with float or int type.
round_to_n_digits : int, optional
The number of digits to round the result to.
Returns
-------
float
The percent values less than 54 mg/dL.
"""
_validate_bg(bg_array)
return percent_values_by_range(
bg_array, lower_bound=1, upper_bound=54, upper_bound_operator=operator.lt, round_to_n_digits=round_to_n_digits,
)
def percent_values_lt_40(bg_array: "np.ndarray[np.float64]", round_to_n_digits: int = 3) -> np.float64:
"""
Calculate the percent of values less than (lt) 40 mg/dL.
Parameters
----------
bg_array : ndarray
1D array containing data with float or int type.
round_to_n_digits : int, optional
The number of digits to round the result to.
Returns
-------
float
The percent values less than 54 mg/dL.
"""
_validate_bg(bg_array)
return percent_values_by_range(
bg_array, lower_bound=1, upper_bound=40, upper_bound_operator=operator.lt, round_to_n_digits=round_to_n_digits,
)
def percent_values_gt_180(bg_array: "np.ndarray[np.float64]", round_to_n_digits: int = 3) -> np.float64:
"""
Calculate the percent of values greater than (gt) 180 mg/dL.
Parameters
----------
bg_array : ndarray
1D array containing data with float or int type.
round_to_n_digits : int, optional
The number of digits to round the result to.
Returns
-------
float
The percent values above 180.
"""
return percent_values_by_range(
bg_array,
lower_bound=180,
upper_bound=1000,
lower_bound_operator=operator.gt,
round_to_n_digits=round_to_n_digits,
)
def percent_values_gt_250(bg_array: "np.ndarray[np.float64]", round_to_n_digits: int = 3) -> np.float64:
"""
Calculate the percent of values greater than (gt) 250 mg/dL.
Parameters
----------
bg_array : ndarray
1D array containing data with float or int type.
round_to_n_digits : int, optional
The number of digits to round the result to.
Returns
-------
int
The percent values greater than (gt) 250.
"""
return percent_values_by_range(
bg_array,
lower_bound=250,
upper_bound=1000,
lower_bound_operator=operator.gt,
round_to_n_digits=round_to_n_digits,
)
def percent_values_gt_300(bg_array: "np.ndarray[np.float64]", round_to_n_digits: int = 3) -> np.float64:
"""
Calculate the percent of values greater than (gt) 300 mg/dL.
Parameters
----------
bg_array : ndarray
1D array containing data with float or int type.
round_to_n_digits : int, optional
The number of digits to round the result to.
Returns
-------
int
The percent values greater than (gt) 300.
"""
return percent_values_by_range(
bg_array,
lower_bound=300,
upper_bound=1000,
lower_bound_operator=operator.gt,
round_to_n_digits=round_to_n_digits,
)
def percent_values_gt_400(bg_array: "np.ndarray[np.float64]", round_to_n_digits: int = 3) -> np.float64:
"""
Calculate the percent of values greater than (gt) 400 mg/dL.
Parameters
----------
bg_array : ndarray
1D array containing data with float or int type.
round_to_n_digits : int, optional
The number of digits to round the result to.
Returns
-------
int
The percent values greater than (gt) 400.
"""
return percent_values_by_range(
bg_array,
lower_bound=400,
upper_bound=1000,
lower_bound_operator=operator.gt,
round_to_n_digits=round_to_n_digits,
)
def episodes(
bg_array: "np.ndarray[np.float64]", episodes_threshold: int, min_ct_per_ep: int = 3, min_duration: int = 5,
) -> np.float64:
"""
Calculate the number of episodes for a given set of glucose values based on provided thresholds.
How the episode count it calculated.
1. Identify all bg values that are within episode range.
2. Find the array value that is in range and its next value after it is not in range. This gives the index of the
last bg in an potential episode.
3. The next check looks at the value at the array index number based on min_ct_per_ep before the value from step 2.
If that value is a 1 then we count this range as an "episode".
Parameters
----------
bg_array : ndarray
1D array containing data with float or int type.
episodes_threshold : int
Any bg values below this value will be considered as within the episode.
min_ct_per_ep : int, optional
The number of consecutive bg values required in the threshold range to be considered an episode.
min_duration : int, optional (Not Implemented at this time.)
The number of minutes expected between each bg value in the array. If there are gaps the code will .....
Returns
-------
int
The number of episodes matching input specifications.
"""
_validate_bg(bg_array)
check_string = "(in_range == 1) & (np.roll(in_range, -1) == 0) "
i = min_ct_per_ep - 1
while i > 0:
check_string = check_string + f" & (np.roll(in_range, {i}) == 1) "
i -= 1
in_range = np.where(bg_array < episodes_threshold, 1, 0)
episodes_count = np.count_nonzero(in_range[eval(check_string)])
return episodes_count
def blood_glucose_risk_index(
bg_array: "np.ndarray[np.float64]", round_to_n_digits: int = 2
) -> Tuple[float, float, float]:
"""
Calculate the LBGI, HBGI and BRGI within a set of glucose values from <NAME>., & <NAME>. (2009)
Parameters
----------
bg_array : ndarray
1D array containing data with float or int type.
round_to_n_digits : int, optional
The number of digits to round the result to.
Returns
-------
int
The number LBGI results.
int
The number HBGI results.
int
The number BRGI results.
"""
_validate_bg(bg_array)
bg_array[bg_array < 1] = 1 # this is added to take care of edge case BG <= 0
transformed_bg = 1.509 * ((np.log(bg_array) ** 1.084) - 5.381)
risk_power = 10 * (transformed_bg ** 2)
low_risk_bool = transformed_bg < 0
high_risk_bool = transformed_bg > 0
rlBG = risk_power * low_risk_bool
rhBG = risk_power * high_risk_bool
lbgi = np.mean(rlBG)
hbgi = np.mean(rhBG)
bgri = round(lbgi + hbgi, round_to_n_digits)
return (
round(lbgi, round_to_n_digits),
round(hbgi, round_to_n_digits),
bgri,
)
def lbgi_risk_score(lbgi: np.float64) -> int:
"""
Calculate the Tidepool Risk Score associated with the LBGI
https://docs.google.com/document/d/1EfIqZPsk_aF6ccm2uxO8Kv6677FIZ7SgjAAX6CmRWOM/
Parameters
----------
lbgi : float
LBGI value calculated from BGRI
Returns
-------
int
The Tidepool LBGI Risk Score.
"""
if lbgi > 10:
risk_score = 4
elif lbgi > 5:
risk_score = 3
elif lbgi > 2.5:
risk_score = 2
elif lbgi > 0:
risk_score = 1
| |
ανάταξις ανάταση ανάτασις
ανάφαση ανάφλεξη ανάχρειο ανάχωμα ανέβασμα ανέγερση ανέγερσις ανέκδοτο ανέλιξη
ανέλκυση ανέλκυσις ανέλο ανέμη ανέμισμα ανένταχτος ανέσα ανέσπερο ανέχεια
ανήρ ανήφορος ανία ανίδρυση ανίδρυσις ανίχνευση ανίψι αναίδεια αναίρεση
αναβάθμισις αναβάθρα αναβάπτιση αναβάπτισις αναβάπτισμα αναβάτης αναβάτρια
αναβίβασις αναβίωμα αναβίωση αναβίωσις αναβαθμίδα αναβαθμίδωση αναβαθμίς
αναβαθμολόγησις αναβαθμός αναβαλλόμενος αναβαπτισμός αναβαπτιστής αναβατήρας
αναβατόριο αναβιβασμός αναβλάστηση αναβλητικότης αναβλητικότητα αναβολέας
αναβολεύς αναβολικά αναβολισμός αναβοσβήσιμο αναβρασμός αναβροχιά αναβρυτήριο
αναγάλλια αναγέλασμα αναγέννηση αναγέννησις αναγγελία αναγκάμι αναγκαίο
αναγκαιότητα αναγκασμός αναγνωρισιμότητα αναγνωσιμότης αναγνωσιμότητα
αναγνωστήριο αναγνωστικό αναγνωστικότητα αναγνώριση αναγνώρισις αναγνώστης
αναγούλα αναγούλιασμα αναγραμματισμός αναγραφέας αναγραφή αναγωγή αναγόμωση
αναγόρευσις αναγύρισμα αναδάσωση αναδάσωσις αναδίπλωση αναδίπλωσις αναδίφηση
αναδαμαλισμός αναδασμός αναδεντράδα αναδεξιμιά αναδεξιμιός αναδευτήρας
αναδημιουργία αναδημοσίευση αναδημοσίευσις αναδιάρθρωση αναδιάρθρωσις
αναδιαμελισμός αναδιανομή αναδιαπραγμάτευση αναδιαρρύθμιση αναδιατύπωση
αναδιοργανωτής αναδιπλασιασμός αναδιφητής αναδουλειά αναδοχή αναδρομή
αναδρομικότητα αναδόμηση αναζήτηση αναζήτησις αναζωογόνηση αναζωογόνησις
αναζωπύρωσις αναθάρρηση αναθάρρησις αναθέρμανση αναθέρμανσις αναθέσμιση
αναθεματισμός αναθεωρητής αναθεωρητισμός αναθεώρηση αναθεώρησις αναθρεφτή
αναθυμίαση αναθυμίασις αναθύμημα αναθύμηση αναιμία αναιρέτης αναισθησία
αναισθητικό αναισθητοποίηση αναισθητοποίησις αναισχυντία ανακάλυψη ανακάτεμα
ανακάτωση ανακήρυξη ανακίνηση ανακαίνιση ανακαινίστρια ανακαινιστής ανακαράς
ανακατάληψη ανακατάταξη ανακατανομή ανακατασκευή ανακατεύθυνση ανακατοσούρας
ανακατωσούρα ανακατωσούρας ανακεράμωση ανακεφαλαίωση ανακεφαλαιοποίηση
ανακλάδωμα ανακλαστήρας ανακοίνωση ανακοινωθέν ανακολουθία ανακομιδή ανακοπή
ανακούφιση ανακρίβεια ανακρίτρια ανακριτής ανακριτική ανακρυστάλλωση
ανακυψιμότητα ανακωχή ανακόντα ανακύκληση ανακύκλωση αναλήθεια αναλαμπή
αναλγησία αναλγητικά αναλγητικό αναληπτικά αναλλαξιά αναλογία αναλογισμός
αναλυτής αναλυτικότητα αναλφαβητισμός αναλόγιο αναλύτρια αναλώσιμα αναμάρτητος
αναμέτρηση αναμεικτήρας αναμελιά αναμετάδοση αναμεταδότης αναμηρυκασμός
αναμονή αναμορφωτήριο αναμορφωτής αναμορφώτρια αναμπουμπούλα αναμόρφωση
ανανάς ανανέωση ανανδρία ανανοηματοδότηση αναντιστοιχία αναντρία αναξιοκρατία
αναξιοπιστία αναξιοπρέπεια αναξιοσύνη αναξιότητα αναξυρίς αναοριοθέτηση
αναπέταση αναπήδημα αναπήδηση αναπήδησις αναπήνιση αναπαημός αναπαλαίωση
αναπαμός αναπαράσταση αναπαραγωγή αναπαραδιά αναπαραδιάρης αναπαραδιάρισσα
αναπαυτήριον αναπεριέλιξη αναπεταρούδια αναπηρία αναπλήρωμα αναπλήρωση
αναπλειστηριασμός αναπληροφόρηση αναπληρωτής αναπληρώτρια αναπλώριση
αναπνιά αναπνοή αναποδιά αναποδογύρισμα αναποκάλυπτος αναπολιτισμός αναπομπή
αναποφασιστικότητα αναπροεξόφληση αναπροσαρμογή αναπρόσληψη αναπτέρωση
αναπτήρ αναπτήρας αναπτηράκι αναπόδιση αναπόληση αναπόλησις αναπότρεπτο
αναπύρωση αναρέσα αναρή αναρθρία αναριθμητισμός αναρμοδιότης αναρμοδιότητα
αναρρίπιση αναρρίπισις αναρρίχηση αναρρίχησις αναρριχήτρια αναρριχητής
αναρρούσα αναρρωτήριο αναρρωτήριον αναρρόφηση αναρρόφησις αναρρύθμιση αναρτήρ
αναρχία αναρχιδία αναρχικότητα αναρχισμός αναρχοκαπιταλισμός
αναρχοκομμούνι αναρχοκουμούνι αναρχοπάνκ αναρχοπίτουρας αναρχοσυνδικαλισμός
αναρχούμενο ανασάλεμα ανασήκωμα ανασαιμιά ανασασμός ανασκάλεμα ανασκέλωμα
ανασκίρτηση ανασκίρτησις ανασκαφέας ανασκαφή ανασκελάς ανασκευή ανασκολοπισμός
ανασκούμπωμα ανασκόπηση ανασκόπησις αναστάτωμα αναστάτωση αναστάτωσις
αναστήλωση αναστήλωσις αναστενάρης αναστενάρια αναστενάρισσα αναστεναγμός
αναστολή αναστοχασμός αναστοχαστικότητα αναστροφή αναστόμωση αναστόφυτο
αναστύλωσις ανασυγκρότηση ανασυγκρότησις ανασυνδυασμός ανασυσκευασία
ανασχεδιασμός ανασχηματισμός ανασόνι ανασύνδεση ανασύνδεσις ανασύνθεση
ανασύνταξη ανασύνταξις ανασύσταση ανασύστασις ανατάραγμα ανατάραξη ανατίμηση
ανατίναξη ανατίναξις αναταξινόμηση αναταραγμός αναταραχή ανατιμητής
ανατοκισμός ανατολή ανατολίστρια ανατολίτης ανατολίτις ανατολίτισσα
ανατολικασιάτης ανατολικοασιάτης ανατολιστής ανατομή ανατομία ανατομείο
ανατοποθέτηση ανατρίχιασμα ανατριχίλα ανατροπέας ανατροπή ανατροφέας ανατροφή
ανατόμος ανατύπωση αναφαγιά αναφιλητό αναφιώτης αναφιώτισσα αναφλεκτήρας
αναφορέας αναφροδισία αναφυλαξία αναφύτευση αναφώνηση αναχαίτιση αναχαιτισμός
αναχρονισμός αναχωμάτωση αναχωρητήριο αναχωρητής αναχωρητισμός αναχώνευση
αναψηλάφηση αναψηλάφησις αναψυκτήριο αναψυκτήριον αναψυκτικό αναψυκτικόν
ανδορρανός ανδράδελφος ανδράδερφος ανδράποδο ανδραγάθημα ανδραγαθία ανδραδέλφη
ανδρεία ανδρείκελο ανδρείκελον ανδρειοσύνη ανδρειότητα ανδριάντας ανδριάς
ανδριαντοποιός ανδρισμός ανδριώτης ανδριώτισσα ανδρογένεση ανδρογυνία
ανδρογόνα ανδρογόνο ανδροειδές ανδροκοίτης ανδροκρατία ανδρολογία ανδρολόγος
ανδρωνίτης ανδρόγυνο ανδρόπαυση ανδρώνας ανεβασιά ανεβατόρι ανεβοκατέβασμα
ανεγκεφαλία ανεδαφικότης ανεδαφικότητα ανεικονικότητα ανειλικρίνεια
ανεκδοτολόγος ανεκτικότης ανεκτικότητα ανελαστικότης ανελαστικότητα
ανελκυστήρ ανελκυστήρας ανεμελιά ανεμική ανεμικό ανεμιστήρ ανεμιστήρας
ανεμιστής ανεμοβλογιά ανεμοβρόχι ανεμοβόρι ανεμογεννήτρια ανεμογκάστρι
ανεμογρίβαδο ανεμογριβάδι ανεμοδείκτης ανεμοδείχτης ανεμοδούρα ανεμοδόχη
ανεμοθραύστης ανεμοθύελλα ανεμοθώρακας ανεμοκυπρίνοι ανεμοκυπρίνος ανεμολάβαρο
ανεμολόγιον ανεμομάζεμα ανεμομάζωμα ανεμομελωδός ανεμοξουριά ανεμοπλάνο
ανεμοπύρωμα ανεμορούφουλας ανεμορρόμβιο ανεμοσκόπιο ανεμοστάτης ανεμοστρόβιλος
ανεμοσυρμή ανεμούρι ανεμούριο ανεμυαλιά ανεμόβροχο ανεμόκαλτσα ανεμόμετρο
ανεμόπτερο ανεμόπτερον ανεμόσκαλα ανεμότρατα ανεμώνα ανεμώνη ανεντιμότητα
ανεξέταση ανεξίτηλο ανεξαρτησία ανεξαρτητοποίηση ανεξαρτητοποίησις ανεξιγνωμία
ανεξικακία ανεπάρκεια ανεπιείκεια ανεπιστημοσύνη ανεπιστρέφων ανεπιτηδειότης
ανεράιδα ανεργία ανεριά ανερούσα ανευθυνοϋπεύθυνος ανευθυνότητα ανευλάβεια
ανευφήμηση ανευφημία ανεφοδιασμός ανεψιά ανεψιός ανεύρεση ανεύρεσις ανεύρυσμα
ανηθικότης ανηθικότητα ανηλικιότητα ανηλικότης ανηλικότητα ανημποριά ανημπόρια
ανηφοριά ανηφόρα ανηφόρι ανηψιά ανηψιός ανθάκι ανθέλικα ανθέλληνας ανθέμιο
ανθήρ ανθήρας ανθί ανθαγορά ανθεθνικότητα ανθεκτικότης ανθεκτικότητα
ανθελμινθικά ανθελονοσιακά ανθεμίδα ανθεστήρια ανθηρότης ανθηρότητα ανθιβόλι
ανθοβαφία ανθοβολή ανθοβολία ανθοβολιά ανθοβοσκός ανθοβόλημα ανθοβόληση
ανθογυάλι ανθοδέσμη ανθοδέτης ανθοδέτρια ανθοδετική ανθοδοχείο ανθοδόχη
ανθοκήπιο ανθοκαλλιέργεια ανθοκεφαλή ανθοκηπευτική ανθοκλάδι ανθοκομία
ανθοκομική ανθοκούλουρο ανθοκράμβη ανθοκόμος ανθολογία ανθολόγημα ανθολόγηση
ανθολόγιο ανθολόγος ανθοπαραγωγή ανθοπαραγωγός ανθοπωλείο ανθοπώλης ανθοπώλιδα
ανθοπώλισσα ανθοστήλη ανθοστολισμός ανθοστόλισμα ανθοταξία ανθοτόπι ανθοφορία
ανθράκευσις ανθράκωση ανθρακίτης ανθρακαποθήκη ανθρακεύω ανθρακιά ανθρακικό
ανθρακοποίησις ανθρακωρυχείο ανθρακωρύχος ανθρακόνημα ανθρωπoειδές ανθρωπάκι
ανθρωπάριο ανθρωπάριον ανθρωπίστρια ανθρωπιά ανθρωπισμός ανθρωπιστής
ανθρωπογνωσία ανθρωποδύναμη ανθρωποθάλασσα ανθρωποθυσία ανθρωποκεντρικότητα
ανθρωποκοινωνιολογία ανθρωποκτονία ανθρωποκυνηγητό ανθρωπολατρία
ανθρωπολεπτό ανθρωπολογία ανθρωπολόγος ανθρωπολόι ανθρωπομάζεμα ανθρωπομάζωμα
ανθρωπομετρία ανθρωπομορφισμός ανθρωποπάζαρο ανθρωποπούλι ανθρωποσφαγή
ανθρωποφάγος ανθρωποφαγία ανθρωποφοβία ανθρωποώρα ανθρωπωνυμία ανθρωπωνυμικό
ανθρωπότης ανθρωπότητα ανθυγιεινότης ανθυγιεινότητα ανθυπίατρος ανθυπίλαρχος
ανθυπαστυνόμος ανθυποβρύχιο ανθυποκτηνίατρος ανθυπολοχαγός ανθυπομειδίαμα
ανθυποπλοίαρχος ανθυποσμηναγός ανθυποτάξη ανθυποφορά ανθυποψήφια ανθυποψήφιος
ανθόγαλα ανθόγαλο ανθόκηπος ανθόκρινο ανθόμελο ανθόνερο ανθόρροια ανθός
ανθότυρο ανθύλλι ανθύλλιο ανθύλλιον ανθύπας ανθύπατος ανθώνας ανιαρότης
ανιδιοτέλεια ανιθαγενής ανικανότης ανικανότητα ανιλίνη ανιμαλισμός ανιματέρ
ανιολότο ανισοκατανομή ανισομέρεια ανισοπεδοποίηση ανισορροπία ανισοσκέλιστος
ανισοτροπία ανισότητα ανιχνευτής ανιχνεύτρια ανιψάκι ανιψίδι ανιψιά ανιψιός
ανιόντες ανκορά ανκόρ ανοησία ανοιγοκλείσιμο ανοικοδόμηση ανοικτότητα
ανοιχτοχέρα ανοιχτόχρωμα ανομία ανομβρία ανομοίωση ανομοίωσις ανομοιογένεια
ανομοιομέρεια ανομοιομορφία ανομοιότης ανομοιότητα ανοξία ανοράκ ανοργανωσιά
ανορεξιά ανορεξιογόνα ανορθογραφία ανορθωτής ανορθώτρια ανοσία ανοσιουργία
ανοσιότης ανοσιότητα ανοσμία ανοσοανεπάρκεια ανοσοαντιδραστικότητα
ανοσογνωσία ανοσοθεραπεία ανοσοκαθήλωση ανοσοκαταστολή ανοσολογία
ανοσοποίηση ανοσοποίησις ανοσοπροσδιορισμός ανοσοσφαιρίνη ανοσοτροποποίηση
ανοσοχρωματογραφία ανοσοϊστοχημεία ανοστιά ανοσφρησία ανουρία ανοφθαλμία ανοχή
αντάμειψη αντάμωμα αντάμωση αντάπτορας αντάρα αντάρτης αντάρτικο αντάρτισσα
αντέγγραφον αντέγκληση αντέγκλησις αντέκθεση αντέκθεσις αντέκταση αντέκτασις
αντένδειξη αντένδειξις αντένσταση αντένστασις αντέρεισμα αντέτι αντέφεση
αντήλιο αντήχηση αντήχησις αντίβαρο αντίβαρον αντίγονον αντίγραφο αντίγραφον
αντίδι αντίδικος αντίδοτο αντίδοτον αντίδραση αντίδρασις αντίδωρο αντίδωρον
αντίζηλος αντίζυγο αντίθεση αντίθεσις αντίθετο αντίκα αντίκενο αντίκλειθρον
αντίκλινο αντίκλινον αντίκοιλο αντίκοιλον αντίκρισμα αντίκρουσις αντίκρυσμα
αντίλαλος αντίλημμα αντίληψη αντίληψις αντίλογος αντίμετρο αντίντερο αντίνυξη
αντίπαλος αντίπαπας αντίπασχα αντίποδας αντίποινα αντίποινο αντίποινον
αντίπραξη αντίπραξις αντίρευμα αντίρρηση αντίρρησις αντίρροπο αντίσκηνο
αντίσταση αντίστασις αντίστιξη αντίστιξις αντίστοιχο αντίστυλο αντίσωμα
αντίτιμον αντίτυπο αντίτυπον αντίφα αντίφαση αντίφραση αντίφωνα αντίφωνο
αντίχαρη αντίχειρας αντίχριστος αντίχτυπος αντίψυχο ανταγωγή ανταγωνίστρια
ανταγωνιστής ανταγωνιστικότης ανταγωνιστικότητα ανταλής ανταληγείς
ανταλλαγή ανταλλακτήριο ανταλλακτικό ανταμοιβή αντανάκλαση αντανάκλασις
αντανακλαστικό ανταπάντηση ανταπάντησις ανταπαίτηση ανταπαίτησις ανταπαιτητής
ανταπεργός ανταποκρίτρια ανταποκρισιμότητα ανταποκριτής ανταπόδειξη ανταπόδοση
ανταπόκριση ανταπόκρισις ανταρσία ανταρτοπόλεμος αντασφάλεια αντασφάλιση
αντασφαλιστής ανταύγεια αντεγγύηση αντεισαγγελέας αντεκδίκηση αντεκδίκησις
αντεμπρησμός αντενέργεια αντενοκάταρτο αντενοκατάρτι αντεξέταση αντεπένδυση
αντεπίθεσις αντεπαγωγή αντεπανάσταση αντεπανάστασις αντεπαναστάτης
αντεπισταλία αντεπιχείρημα αντεράστρια αντερί αντεραστής αντεροβγάλτης
αντευρωπαϊσμός αντευρωπαϊστής αντζουγόπαστα αντζουριά αντζούγα αντζούγια
αντηλάρισμα αντηλιά αντηλιακό αντηρίδα αντηρίς αντηχείο αντηχείον
αντιήρωας αντιαγγειογένεση αντιαγνωστικός αντιαιμοπεταλιακά αντιαιμορραγικά
αντιαλλεργικό αντιαμερικανισμός αντιαναθεωρητής αντιανδρογόνα αντιανεμικό
αντιατομικισμός αντιβίωση αντιβαπτισμός αντιβαρύτητα αντιβασίλισσα
αντιβασιλεία αντιβασιλεύς αντιβασιλιάς αντιβηχικά αντιβιόγραμμα αντιβούισμα
αντιβρόχιο αντιβρόχιον αντιγαμητικό αντιγιβεριλήνη αντιγκέα αντιγκεϊκά
αντιγνωμία αντιγνωσιαρχικός αντιγραφέας αντιγραφή αντιγραφεύς αντιγόνο
αντιδάνειο αντιδήμαρχος αντιδανεισμός αντιδεξιός αντιδημαρχία αντιδημοτικότης
αντιδιαβητικά αντιδιαδήλωση αντιδιαδήλωσις αντιδιαδηλωτής αντιδιανοσαλάτα
αντιδιαστολή αντιδικία αντιδογματικότητα αντιδογματισμός αντιδραστήρας
αντιδραστήριον αντιδραστικότητα αντιδρόμηση αντιδόνημα αντιδόνηση αντιεθνικός
αντιεθνισμός αντιεισαγγελέας αντιεισαγγελεύς αντιελκωτικά αντιεμετικά
αντιεξουσιαστής αντιεπιληπτικά αντιερωτικότητα αντιερωτισμός αντιευρωπαϊσμός
αντιζηλία αντιζυγία αντιζύγι αντιζύγιασμα αντιηλεκτρόνιο αντιημικρανικά
αντιθάλαμος αντιθεϊστής αντιθρησκευτικότητα αντιθρομβωτικά αντιθρομβωτικό
αντιθωράκιση αντιιλιγγικά αντιιμπεριαλισμός αντιισταμινικά αντιισταμινικό
αντικάμαρα αντικέρ αντικέρης αντικίνητρο αντικαθρέφτισμα αντικαθρεφτισμός
αντικανονικότητα αντικαπιταλισμός αντικαπνίστρια αντικαπνιστής αντικατάσκοπος
αντικατάστασις αντικαταβολή αντικαταθλιπτικά αντικαταθλιπτικό
αντικατασκοπία αντικατασκοπεία αντικαταστάτης αντικαταστάτις αντικαταστάτρια
αντικείμενο αντικείμενον αντικειμενικότης αντικειμενικότητα αντικειμενισμός
αντικειμενοποίηση αντικεμαλιστής αντικενό αντικλείδι αντικληρικαλισμός
αντικληρισμός αντικνήμιο αντικνήμιον αντικοινοβουλευτισμός αντικοινωνικότητα
αντικομματισμός αντικομμουνισμός αντικομμουνιστής αντικομουνίστρια
αντικομουνιστής αντικομφορμίστας αντικομφορμίστρια αντικομφορμισμός
αντικουάρκ αντικουνουπικό αντικούκου αντικριστής αντικρυστής αντικυκλών
αντικυριώτης αντιλάμπισμα αντιλήπτορας αντιλήπτωρ αντιλαβή αντιλεξικό
αντιλεϊσμανιακά αντιληπτικότης αντιληπτικότητα αντιληπτότητα αντιλογάριθμος
αντιλογισμός αντιλόπη αντιμάμαλο αντιμέτρηση αντιμέτρησις αντιμήνσιο
αντιμανιακά αντιμαχία αντιμερκελιστής αντιμετάθεση αντιμετάθεσις αντιμετάταξη
αντιμεταρρύθμισις αντιμεταφυσίτης αντιμεταφυσική αντιμεταφυσικός
αντιμεταχώρησις αντιμετώπιση αντιμετώπισις αντιμικροβιακά αντιμιλιταρίστρια
αντιμιλιταριστής αντιμισθία αντιμολία αντιμονή αντιμονίτης αντιμοναρχικός
αντιμυκητιασικά αντιμυοσπασμωδικά αντιμωλία αντιμόνιο αντιμόνιον αντιναύαρχος
αντινομισμός αντινομιστής αντιντετερμινισμός αντιξιφισμός αντιξοότης
αντιοικονομία αντιολίσθηση αντιορός αντιπάθεια αντιπάπας αντιπαγκοσμιοποίηση
αντιπαλότητα αντιπαράδειγμα αντιπαράθεση αντιπαράθεσις αντιπαράσταση
αντιπαράταξη αντιπαράταξις αντιπαραβολή αντιπαρκινσονικά αντιπαροχή
αντιπατριώτης αντιπατριώτισσα αντιπελάργηση αντιπερισπασμός αντιπεριφερειάρχης
αντιπηκτικό αντιπιτυριδικό αντιπλάγια αντιπληθωρισμός αντιπλοίαρχος
αντιποίηση αντιποίησις αντιπολίτευση αντιπολίτευσις αντιπραγματισμός
αντιπροεδρία αντιπροεδρίνα αντιπροπαρασκευή αντιπροσαρμογή αντιπροσφορά
αντιπροσωπεία αντιπροσωπευτικότητα αντιπροσώπευση αντιπροσώπευσις
αντιπρόεδρος αντιπρόσκληση αντιπρόσωπος αντιπρόταση αντιπρότασις αντιπρύτανης
αντιπτέραρχος αντιπτέριση αντιπυρά αντιπυρκαγιά αντιπύραρχος αντιπύραυλος
αντιρρευματικά αντιρρησίας αντιρρόπηση αντιρρόπησις αντιρρύπανση αντισήκωμα
αντισεξουαλικότητα αντισημίτης αντισημίτρια αντισημιτισμός αντισηπτικά
αντισκίαση αντισοβιετισμός αντιστάθμιση αντιστάθμισις αντιστάθμισμα αντιστάτης
αντιστήριξη αντιστήριξις αντισταθμισμός αντιστασιακός αντιστικτική
αντιστοιχία αντιστράτηγος αντιστρεπτικότητα αντιστρεπτότητα αντιστρεψιμότητα
αντιστύλι αντισυμμετρία αντισυνταγματάρχης αντισυνταγματικότης αντισυστημισμός
αντισφαίρισις αντισφαιρίστρια αντισφαιριστής αντισχέδιο αντισχέδιον
αντισύλληψη αντισύμπαν αντιτάσσομαι αντιτάσσω αντιτείχισμα αντιτοξίνη
αντιτορπιλλικό αντιτορπιλλικόν αντιτριβή αντιτρομοκρατία αντιυπερτασικά
αντιφάρμακον αντιφέγγισμα αντιφασίστας αντιφασίστρια αντιφασισμός αντιφασιστής
αντιφατικότητα αντιφεγγιά αντιφεμινίστρια αντιφεμινισμός αντιφεμινιστής
αντιφλεγμονώδες αντιφλεγμονώδη αντιφυλετικός αντιφυματικά αντιφωνία αντιφώνηση
αντιχάος αντιχαιρέτισμα αντιχαιρετισμός αντιχαρακτήρας αντιχολινεργικά
αντιχριστιανισμός αντιψυχωσικά αντιψυχωτικά αντιψύχι αντιύλη αντλία
αντλησιοταμιευτήρας αντλιοστάσιο αντλιωρός αντονομασία αντοχή αντράδελφος
αντράκι αντράκλα αντράλα αντρέ αντρακλοσαλάτα αντραμίδα αντρεία αντρειά
αντρειότητα αντρισμός αντρογυναίκα αντρομίδα αντροσύνη αντροχωρίστρα αντρούλης
αντρών αντσούγα αντσούγια αντωνυμία αντώνυμο αντώνυμον αντώσμωση ανυδρία
ανυπαρξία ανυποκρισία ανυποληψία ανυπομονησία ανυποταγή ανυποταξία ανυπόστατο
ανυστεροβουλία ανυφάντρα ανυφάντρια ανυφαντάρης ανυφαντής ανυψωμός ανυψωτήρ
ανυψωτής ανφάς ανωδομή ανωδομία ανωκύκλωση ανωμαλία ανωμαλιάρης ανωμεριά
ανωνυμογράφος ανωνυμογραφία ανωνυμοτηλεφωνητής ανωορρηξία ανωριμότης
ανωτερότης ανωτερότητα ανωφέλεια ανωφέρεια ανόμημα ανόπτηση ανόρθωση ανόρθωσις
ανόρυξις ανύπαρκτο ανύχι ανύψωση ανύψωσις ανώγαιον | |
p[1]
if node.way in self.point_db:
current = self.point_db[node.way][0]
if current.rlid != node.rlid:
# This can happen for some data in crossings for example
#raise RuntimeError("Node with RLID %s and position %s already exists in the database (%s, %s)" % (node.rlid, latlon_str(node.way), current, node))
_log.warning(f"Node with RLID {node.rlid} and position {latlon_str(node.way)} already"
f" exists in the database ({current}, {node})")
merge_tags(current, node.tags, data_src_name)
else:
self.point_db[node.way] = [ node ]
if do_snap and snap_way is not None:
self._add_node_into_way(snap_way.rlid, p)
return did_snap
def _add_node_into_way(self, rlid, point):
segs = self.way_db.get(rlid, [])
for seg in segs:
for idx, p in enumerate(seg.way):
if idx == 0:
continue
is_between, _ = point_between_points(point, seg.way[idx-1], p, 1e-6)
if is_between:
point.dist = seg.way[idx-1].dist + dist2d(seg.way[idx-1], point)
seg.way.insert(idx, point)
return
_log.warning(f"node {latlon_str(point)} not found in any way segment for RLID {rlid}")
def _split_and_merge(self, way, data_src_name):
if len(way.way) == 1:
# skipping (extremely short) ways that were reduced to one point
return
if not way.rlid in self.way_db:
# first segment for rlid
self.way_db[way.rlid] = [ way ]
return
segs = self.way_db[way.rlid]
if way.way[-1].dist <= segs[0].way[0].dist:
# way is before existing segments
segs.insert(0, way)
return
if way.way[0].dist >= segs[-1].way[-1].dist:
# way is after existing segments
segs.append(way)
return
segs_idx = 0
while segs_idx < len(segs): # we modify segs inside, so can't use for loop
seg = segs[segs_idx]
if seg.way[-1].dist <= way.way[0].dist:
# seg is before way
segs_idx += 1
continue
if seg.way[0].dist >= way.way[-1].dist:
# seg is after way, no overlap
segs.insert(segs_idx, way)
#print("insert no overlap")
break
# way starts somewhere inside seg, scan to start of way
#print("way ", way)
#print("matching seg", seg)
seg_idx = 0
while seg_idx < len(seg.way):
if seg.way[seg_idx].dist >= way.way[0].dist:
if seg.way[seg_idx].dist > way.way[0].dist:
# start of way is a new point, insert
seg.way.insert(seg_idx, way.way[0])
break
seg_idx += 1
if seg_idx > 0:
# split out segment which is before way
seg_copy = seg.make_copy_new_way(seg.way[:seg_idx+1])
segs.insert(segs_idx, seg_copy)
segs_idx += 1
seg.way = seg.way[seg_idx:]
#print("split before")
# now seg starts at same point as way
assert seg.way[0] == way.way[0]
# way may have new points, insert those into seg, if any
seg_idx = 0
way_idx = 0
while seg_idx < len(seg.way) and way_idx < len(way.way):
if seg.way[seg_idx].dist > way.way[way_idx].dist:
seg.way.insert(seg_idx, way.way[way_idx])
seg_idx += 1
way_idx += 1
if seg_idx < len(seg.way):
# split out segment which is after way
seg_copy = seg.make_copy_new_way(seg.way[seg_idx-1:])
segs.insert(segs_idx + 1, seg_copy)
seg.way = seg.way[:seg_idx]
assert seg.way[0] == way.way[0]
next_way = None
if len(way.way) > len(seg.way):
# split way
next_way = way.make_copy_new_way(way.way[len(seg.way)-1:])
way.way = way.way[:len(seg.way)]
# merge tags
assert seg.way[-1] == way.way[-1]
assert seg.way[0] == way.way[0] and seg.way[-1] == way.way[-1]
merge_tags(seg, way.tags, data_src_name)
#print("insert with split")
if next_way is None:
break
way = next_way
if way.way[0].dist >= segs[-1].way[-1].dist:
# special case when next_way is last
segs.append(way)
break
def insert_rlid_way(self, way, data_src_name, debug_ways=None):
if way.rlid not in self._ref_way_db:
_log.debug(f"Skipping RLID {way.rlid} (not in reference geometry)")
return
_, ways = self._adapt_way_into_reference_geometry(way, data_src_name)
for w in ways:
if debug_ways is not None:
debug_ways.append(w.make_copy_new_way(copy_way(w.way)))
self._split_and_merge(w, data_src_name)
if self._perform_self_testing:
if way.rlid in self.way_db:
self._test_segment(self.way_db[way.rlid])
def _test_segment(self, segs):
it = iter(segs)
prev = next(it)
for seg in it:
if seg.way[0].dist < prev.way[-1].dist:
raise RuntimeError("Bad order")
prev = seg
for seg in segs:
assert len(seg.way) >= 2
ref_way = self._ref_gs.find_reference_way(seg.way[0], seg.rlid)
ref_idx = 0
while ref_idx < len(ref_way.way) and ref_way.way[ref_idx] != seg.way[0]:
ref_idx += 1
assert ref_idx < len(ref_way.way)
prev = None
for p in seg.way:
if prev is not None:
dist = dist2d(prev, p)
if dist < self.POINT_SNAP_DISTANCE:
_log.error(f"ref.way: {ref_way.way}")
_log.error(f"seg.way: {seg.way}")
_log.error(f"ref_way: {ref_way}")
_log.error(f"seg : {seg}")
raise RuntimeError("Point closer placed than snap distance %s" % dist)
if ref_idx == len(ref_way.way):
_log.error(f"ref.way: {ref_way.way}")
_log.error(f"seg.way: {seg.way}")
_log.error(f"ref_way: {ref_way}")
_log.error(f"seg : {seg}")
raise RuntimeError("More points in segment than in reference way")
if p.dist != ref_way.way[ref_idx].dist:
_log.error(f"ref.way: {ref_way.way}")
_log.error(f"seg.way: {seg.way}")
_log.error(f"ref_way: {ref_way}")
_log.error(f"seg : {seg}")
raise RuntimeError("Dist mismatch got %s expected %s (ref_idx %s)" % (p.dist, ref_way.way[ref_idx].dist, ref_idx))
ref_idx += 1
prev = p
def _test_way_dist(self, way, allow_unset=False):
it = iter(way.way)
prev = next(it)
ref_dist = way.way[0].dist
if allow_unset and ref_dist == -1:
ref_dist = 0
else:
assert ref_dist >= 0
for p in it:
if prev == p:
_log.info(f"{way.way}")
_log.info(f"{way}")
raise RuntimeError("Duplicate point %s" % p)
dist = dist2d(prev, p)
if dist < self.POINT_SNAP_DISTANCE:
_log.info(f"{way.way}")
_log.info(f"{way}")
raise RuntimeError("Point closer placed than snap distance at %s in ref_way %s" % (p, dist))
ref_dist += dist
if (not allow_unset or p.dist != -1) and abs(p.dist - ref_dist) > 1e-6:
_log.info(f"{way.way}")
_log.info(f"{way}")
raise RuntimeError("Bad dist in ref_way %s (expected %s got %s)" % (p, ref_dist, p.dist))
if p.dist != -1:
ref_dist = p.dist
prev = p
def _retry_adapt_way_extending_reference_geometry(self, way):
# will not work for closed loops, or self-crossing stuff
new_way = []
snapped = []
ref_way = None
snap_count = 0
for way_idx, point in enumerate(way.way):
p, rway = self._ref_gs.snap_waypoint_into_geometry(way, way_idx, self.POINT_SNAP_DISTANCE, self.MAX_SNAP_DISTANCE)
if p is not None:
new_way.append(p)
snapped.append(True)
ref_way = rway
snap_count += 1
else:
# snap to other way if possible, using short snap distance
_, p, rway = self._ref_gs.snap_point_into_geometry(point, self.POINT_SNAP_DISTANCE, self.POINT_SNAP_DISTANCE)
if p is not None:
new_way.append(p)
else:
new_way.append(point)
snapped.append(False) # only count points snap to self RLID
if ref_way is None:
_log.warning(f"Way with RLID {way.rlid} could not be snapped to reference geometry")
return False
assert ref_way.rlid == way.rlid
_log.warning(f"must extend reference geometry for RLID {way.rlid} (only {snap_count} "
f"of {len(way.way)} points could be snapped)")
first_snap = 0
for idx, is_snap in enumerate(snapped):
if is_snap:
first_snap = idx
break
last_snap = len(new_way)
for idx in range(0, len(snapped)):
if snapped[len(snapped) - 1 - idx]:
last_snap = len(snapped) - 1 - idx
break
_log.debug(f"snapped {snapped}")
_log.debug(f"snappoints {first_snap} {last_snap}")
for way_idx, point in enumerate(way.way):
if way_idx <= first_snap or way_idx >= last_snap:
continue
if not snapped[way_idx]:
# this means snap failure in the middle too not just an extension problem
_log.info(f"Way with RLID {way.rlid} could not be snapped to reference geometry")
return False
extend_way_start = []
extend_way_end = []
for idx, point in enumerate(new_way):
if idx < first_snap:
extend_way_start.append(point)
if idx > last_snap:
extend_way_end.append(point)
current_segs = self.way_db.get(way.rlid, [])
if len(extend_way_start) > 0:
extend_way_start.append(new_way[first_snap])
if not self._ref_gs.extend_geometry(ref_way, extend_way_start, current_segs):
return False
if len(extend_way_end) > 0:
extend_way_end.insert(0, new_way[last_snap])
if not self._ref_gs.extend_geometry(ref_way, extend_way_end, current_segs):
return False
return True
def _adapt_way_into_reference_geometry(self, way, data_src_name, is_retry=False):
# first snap each point of the way into the existing geometry
way.way = remove_short_segments_and_redundant_points(way, self.POINT_SNAP_DISTANCE)
if len(way.way) == 1:
_log.debug(f"RLID {way.rlid} reduced to one point")
return None, [ way ]
new_way = []
prev = None
max_snap_distance = self.MAX_SNAP_DISTANCE
for way_idx, point in enumerate(way.way):
p, ref_way = self._ref_gs.snap_waypoint_into_geometry(way, way_idx, self.POINT_SNAP_DISTANCE, max_snap_distance, new_way)
if p is None:
if is_retry:
raise RuntimeError("Way with RLID %s %s has no existing geometry within %s meters" % (way.rlid, latlon_str(point), max_snap_distance))
success = self._retry_adapt_way_extending_reference_geometry(way)
if success:
return self._adapt_way_into_reference_geometry(way, data_src_name, is_retry=True)
if max_snap_distance < self.EMERGENCY_SNAP_DISTANCE:
max_snap_distance = self.EMERGENCY_SNAP_DISTANCE
p, ref_way = self._ref_gs.snap_waypoint_into_geometry(way, way_idx, self.POINT_SNAP_DISTANCE, max_snap_distance, new_way)
if p is None:
raise RuntimeError("Way with RLID %s %s has no existing geometry within %s meters" % (way.rlid, latlon_str(point), max_snap_distance))
if self._perform_self_testing:
self._test_way_dist(ref_way)
if p != prev:
# sometimes close points are merged to the same position
assert p.dist >= 0
new_way.append(p)
prev = p
if max_snap_distance == self.EMERGENCY_SNAP_DISTANCE:
_log.warning(f"had to use emergency snap distance ({max_snap_distance}m) for {way.rlid} to get it "
f"to match reference geometry")
way.way = new_way
if len(way.way) == 1:
#_log.info("RLID %s reduced to a point" % way.rlid)
return ref_way, [ way ]
if ref_way.way[0] == ref_way.way[-1]:
# closed loop special cases
assert len(ref_way.way) > 2
closed_loop = False
if way.way[0] == way.way[-1]:
assert len(way.way) > 2
closed_loop = True
way.way = way.way[:-1]
elif way.way[-1] == ref_way.way[0]:
# make sure we end at max dist rather than min dist
way.way[-1] = ref_way.way[-1]
elif way.way[0].dist > way.way[-1].dist:
# the way goes past split point of ref_way, split up way
for idx, p in enumerate(way.way):
if p == ref_way.way[0]:
way_part1 = way.way[:idx] + [ ref_way.way[-1] | |
# -*- coding: utf-8 -*-
###########################################################################
## Python code generated with wxFormBuilder (version 3.10.1-0-g8feb16b3)
## http://www.wxformbuilder.org/
##
## PLEASE DO *NOT* EDIT THIS FILE!
###########################################################################
import wx
import wx.xrc
import wx.grid
###########################################################################
## Class SettingsDialogBase
###########################################################################
class SettingsDialogBase ( wx.Dialog ):
def __init__( self, parent ):
wx.Dialog.__init__ ( self, parent, id = wx.ID_ANY, title = u"InteractiveHtmlBom", pos = wx.DefaultPosition, size = wx.Size( 463,497 ), style = wx.DEFAULT_DIALOG_STYLE|wx.STAY_ON_TOP|wx.BORDER_DEFAULT )
self.SetSizeHints( wx.DefaultSize, wx.DefaultSize )
self.Centre( wx.BOTH )
def __del__( self ):
pass
###########################################################################
## Class SettingsDialogPanel
###########################################################################
class SettingsDialogPanel ( wx.Panel ):
def __init__( self, parent, id = wx.ID_ANY, pos = wx.DefaultPosition, size = wx.Size( 400,300 ), style = wx.TAB_TRAVERSAL, name = wx.EmptyString ):
wx.Panel.__init__ ( self, parent, id = id, pos = pos, size = size, style = style, name = name )
bSizer20 = wx.BoxSizer( wx.VERTICAL )
self.notebook = wx.Notebook( self, wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize, wx.NB_TOP|wx.BORDER_DEFAULT )
bSizer20.Add( self.notebook, 1, wx.EXPAND |wx.ALL, 5 )
bSizer39 = wx.BoxSizer( wx.HORIZONTAL )
self.saveSettingsBtn = wx.Button( self, wx.ID_ANY, u"Save current settings...", wx.DefaultPosition, wx.DefaultSize, 0|wx.BORDER_DEFAULT )
bSizer39.Add( self.saveSettingsBtn, 0, wx.ALL, 5 )
bSizer39.Add( ( 50, 0), 1, wx.EXPAND, 5 )
self.generateBomBtn = wx.Button( self, wx.ID_ANY, u"Generate BOM", wx.DefaultPosition, wx.DefaultSize, 0|wx.BORDER_DEFAULT )
self.generateBomBtn.SetDefault()
bSizer39.Add( self.generateBomBtn, 0, wx.ALL, 5 )
self.cancelBtn = wx.Button( self, wx.ID_CANCEL, u"Cancel", wx.DefaultPosition, wx.DefaultSize, 0|wx.BORDER_DEFAULT )
bSizer39.Add( self.cancelBtn, 0, wx.ALL, 5 )
bSizer20.Add( bSizer39, 0, wx.EXPAND, 5 )
self.SetSizer( bSizer20 )
self.Layout()
# Connect Events
self.saveSettingsBtn.Bind( wx.EVT_BUTTON, self.OnSave )
self.generateBomBtn.Bind( wx.EVT_BUTTON, self.OnGenerateBom )
self.cancelBtn.Bind( wx.EVT_BUTTON, self.OnExit )
def __del__( self ):
pass
# Virtual event handlers, override them in your derived class
def OnSave( self, event ):
event.Skip()
def OnGenerateBom( self, event ):
event.Skip()
def OnExit( self, event ):
event.Skip()
###########################################################################
## Class HtmlSettingsPanelBase
###########################################################################
class HtmlSettingsPanelBase ( wx.Panel ):
def __init__( self, parent, id = wx.ID_ANY, pos = wx.DefaultPosition, size = wx.Size( -1,-1 ), style = wx.TAB_TRAVERSAL, name = wx.EmptyString ):
wx.Panel.__init__ ( self, parent, id = id, pos = pos, size = size, style = style, name = name )
b_sizer = wx.BoxSizer( wx.VERTICAL )
self.darkModeCheckbox = wx.CheckBox( self, wx.ID_ANY, u"Dark mode", wx.DefaultPosition, wx.DefaultSize, 0 )
b_sizer.Add( self.darkModeCheckbox, 0, wx.ALL, 5 )
self.showPadsCheckbox = wx.CheckBox( self, wx.ID_ANY, u"Show footprint pads", wx.DefaultPosition, wx.DefaultSize, 0 )
self.showPadsCheckbox.SetValue(True)
b_sizer.Add( self.showPadsCheckbox, 0, wx.ALL, 5 )
self.showFabricationCheckbox = wx.CheckBox( self, wx.ID_ANY, u"Show fabrication layer", wx.DefaultPosition, wx.DefaultSize, 0 )
b_sizer.Add( self.showFabricationCheckbox, 0, wx.ALL, 5 )
self.showSilkscreenCheckbox = wx.CheckBox( self, wx.ID_ANY, u"Show silkscreen", wx.DefaultPosition, wx.DefaultSize, 0 )
self.showSilkscreenCheckbox.SetValue(True)
b_sizer.Add( self.showSilkscreenCheckbox, 0, wx.ALL, 5 )
self.highlightPin1Checkbox = wx.CheckBox( self, wx.ID_ANY, u"Highlight first pin", wx.DefaultPosition, wx.DefaultSize, 0 )
b_sizer.Add( self.highlightPin1Checkbox, 0, wx.ALL, 5 )
self.continuousRedrawCheckbox = wx.CheckBox( self, wx.ID_ANY, u"Continuous redraw on drag", wx.DefaultPosition, wx.DefaultSize, 0 )
self.continuousRedrawCheckbox.SetValue(True)
b_sizer.Add( self.continuousRedrawCheckbox, 0, wx.ALL, 5 )
bSizer18 = wx.BoxSizer( wx.VERTICAL )
bSizer19 = wx.BoxSizer( wx.HORIZONTAL )
self.m_boardRotationLabel = wx.StaticText( self, wx.ID_ANY, u"Board rotation", wx.DefaultPosition, wx.DefaultSize, 0 )
self.m_boardRotationLabel.Wrap( -1 )
bSizer19.Add( self.m_boardRotationLabel, 0, wx.ALL, 5 )
bSizer19.Add( ( 0, 0), 1, wx.EXPAND, 5 )
self.rotationDegreeLabel = wx.StaticText( self, wx.ID_ANY, u"0", wx.DefaultPosition, wx.Size( 30,-1 ), wx.ALIGN_RIGHT|wx.ST_NO_AUTORESIZE )
self.rotationDegreeLabel.Wrap( -1 )
bSizer19.Add( self.rotationDegreeLabel, 0, wx.ALL, 5 )
bSizer19.Add( ( 8, 0), 0, 0, 5 )
bSizer18.Add( bSizer19, 1, wx.EXPAND, 5 )
self.boardRotationSlider = wx.Slider( self, wx.ID_ANY, 0, -36, 36, wx.DefaultPosition, wx.DefaultSize, wx.SL_HORIZONTAL )
bSizer18.Add( self.boardRotationSlider, 0, wx.ALL|wx.EXPAND, 5 )
b_sizer.Add( bSizer18, 0, wx.EXPAND, 5 )
sbSizer31 = wx.StaticBoxSizer( wx.StaticBox( self, wx.ID_ANY, u"Checkboxes" ), wx.HORIZONTAL )
self.bomCheckboxesCtrl = wx.TextCtrl( sbSizer31.GetStaticBox(), wx.ID_ANY, u"Sourced,Placed", wx.DefaultPosition, wx.DefaultSize, 0 )
sbSizer31.Add( self.bomCheckboxesCtrl, 1, wx.ALL, 5 )
b_sizer.Add( sbSizer31, 0, wx.ALL|wx.EXPAND, 5 )
bomDefaultViewChoices = [ u"BOM only", u"BOM left, drawings right", u"BOM top, drawings bottom" ]
self.bomDefaultView = wx.RadioBox( self, wx.ID_ANY, u"BOM View", wx.DefaultPosition, wx.DefaultSize, bomDefaultViewChoices, 1, wx.RA_SPECIFY_COLS )
self.bomDefaultView.SetSelection( 1 )
b_sizer.Add( self.bomDefaultView, 0, wx.ALL|wx.EXPAND, 5 )
layerDefaultViewChoices = [ u"Front only", u"Front and Back", u"Back only" ]
self.layerDefaultView = wx.RadioBox( self, wx.ID_ANY, u"Layer View", wx.DefaultPosition, wx.DefaultSize, layerDefaultViewChoices, 1, wx.RA_SPECIFY_COLS )
self.layerDefaultView.SetSelection( 1 )
b_sizer.Add( self.layerDefaultView, 0, wx.ALL|wx.EXPAND, 5 )
sbSizer10 = wx.StaticBoxSizer( wx.StaticBox( self, wx.ID_ANY, u"Miscellaneous" ), wx.VERTICAL )
self.compressionCheckbox = wx.CheckBox( sbSizer10.GetStaticBox(), wx.ID_ANY, u"Enable compression", wx.DefaultPosition, wx.DefaultSize, 0 )
self.compressionCheckbox.SetValue(True)
sbSizer10.Add( self.compressionCheckbox, 0, wx.ALL, 5 )
self.openBrowserCheckbox = wx.CheckBox( sbSizer10.GetStaticBox(), wx.ID_ANY, u"Open browser", wx.DefaultPosition, wx.DefaultSize, 0 )
self.openBrowserCheckbox.SetValue(True)
sbSizer10.Add( self.openBrowserCheckbox, 0, wx.ALL, 5 )
b_sizer.Add( sbSizer10, 1, wx.EXPAND, 5 )
self.SetSizer( b_sizer )
self.Layout()
b_sizer.Fit( self )
# Connect Events
self.boardRotationSlider.Bind( wx.EVT_SLIDER, self.OnBoardRotationSlider )
def __del__( self ):
pass
# Virtual event handlers, override them in your derived class
def OnBoardRotationSlider( self, event ):
event.Skip()
###########################################################################
## Class GeneralSettingsPanelBase
###########################################################################
class GeneralSettingsPanelBase ( wx.Panel ):
def __init__( self, parent, id = wx.ID_ANY, pos = wx.DefaultPosition, size = wx.Size( -1,-1 ), style = wx.TAB_TRAVERSAL, name = wx.EmptyString ):
wx.Panel.__init__ ( self, parent, id = id, pos = pos, size = size, style = style, name = name )
bSizer32 = wx.BoxSizer( wx.VERTICAL )
sbSizer6 = wx.StaticBoxSizer( wx.StaticBox( self, wx.ID_ANY, u"Bom destination" ), wx.VERTICAL )
fgSizer1 = wx.FlexGridSizer( 0, 2, 0, 0 )
fgSizer1.AddGrowableCol( 1 )
fgSizer1.SetFlexibleDirection( wx.BOTH )
fgSizer1.SetNonFlexibleGrowMode( wx.FLEX_GROWMODE_SPECIFIED )
self.m_staticText8 = wx.StaticText( sbSizer6.GetStaticBox(), wx.ID_ANY, u"Directory", wx.DefaultPosition, wx.DefaultSize, 0 )
self.m_staticText8.Wrap( -1 )
fgSizer1.Add( self.m_staticText8, 0, wx.ALIGN_CENTER_VERTICAL|wx.ALL, 5 )
self.bomDirPicker = wx.DirPickerCtrl( sbSizer6.GetStaticBox(), wx.ID_ANY, wx.EmptyString, u"Select bom folder", wx.DefaultPosition, wx.DefaultSize, wx.DIRP_SMALL|wx.DIRP_USE_TEXTCTRL|wx.BORDER_SIMPLE )
fgSizer1.Add( self.bomDirPicker, 1, wx.ALIGN_CENTER_VERTICAL|wx.ALL|wx.EXPAND, 5 )
self.m_staticText9 = wx.StaticText( sbSizer6.GetStaticBox(), wx.ID_ANY, u"Name format", wx.DefaultPosition, wx.DefaultSize, 0 )
self.m_staticText9.Wrap( -1 )
fgSizer1.Add( self.m_staticText9, 0, wx.ALIGN_CENTER_VERTICAL|wx.ALL, 5 )
bSizer20 = wx.BoxSizer( wx.HORIZONTAL )
self.fileNameFormatTextControl = wx.TextCtrl( sbSizer6.GetStaticBox(), wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.DefaultSize, 0 )
bSizer20.Add( self.fileNameFormatTextControl, 1, wx.ALIGN_CENTER_VERTICAL|wx.BOTTOM|wx.LEFT|wx.TOP, 5 )
self.m_bpButton5 = wx.BitmapButton( sbSizer6.GetStaticBox(), wx.ID_ANY, wx.NullBitmap, wx.DefaultPosition, wx.DefaultSize, wx.BU_AUTODRAW|0 )
self.m_bpButton5.SetMinSize( wx.Size( 30,30 ) )
bSizer20.Add( self.m_bpButton5, 0, wx.ALIGN_CENTER_VERTICAL|wx.ALL, 5 )
fgSizer1.Add( bSizer20, 1, wx.EXPAND, 5 )
sbSizer6.Add( fgSizer1, 1, wx.EXPAND, 5 )
bSizer32.Add( sbSizer6, 0, wx.ALL|wx.EXPAND, 5 )
sbSizer9 = wx.StaticBoxSizer( wx.StaticBox( self, wx.ID_ANY, u"Additional pcb data" ), wx.HORIZONTAL )
self.includeTracksCheckbox = wx.CheckBox( sbSizer9.GetStaticBox(), wx.ID_ANY, u"Include tracks/zones", wx.DefaultPosition, wx.DefaultSize, 0 )
sbSizer9.Add( self.includeTracksCheckbox, 1, wx.ALL, 5 )
self.includeNetsCheckbox = wx.CheckBox( sbSizer9.GetStaticBox(), wx.ID_ANY, u"Include nets", wx.DefaultPosition, wx.DefaultSize, 0 )
sbSizer9.Add( self.includeNetsCheckbox, 1, wx.ALL, 5 )
bSizer32.Add( sbSizer9, 0, wx.ALL|wx.EXPAND, 5 )
sortingSizer = wx.StaticBoxSizer( wx.StaticBox( self, wx.ID_ANY, u"Component sort order" ), wx.VERTICAL )
bSizer4 = wx.BoxSizer( wx.HORIZONTAL )
bSizer6 = wx.BoxSizer( wx.VERTICAL )
componentSortOrderBoxChoices = []
self.componentSortOrderBox = wx.ListBox( sortingSizer.GetStaticBox(), wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize, componentSortOrderBoxChoices, wx.LB_SINGLE|wx.BORDER_SIMPLE )
bSizer6.Add( self.componentSortOrderBox, 1, wx.ALL|wx.EXPAND, 5 )
bSizer4.Add( bSizer6, 1, wx.EXPAND, 5 )
bSizer5 = wx.BoxSizer( wx.VERTICAL )
self.m_btnSortUp = wx.BitmapButton( sortingSizer.GetStaticBox(), wx.ID_ANY, wx.NullBitmap, wx.DefaultPosition, wx.DefaultSize, wx.BU_AUTODRAW|0 )
self.m_btnSortUp.SetBitmap( wx.NullBitmap )
self.m_btnSortUp.SetMinSize( wx.Size( 30,30 ) )
bSizer5.Add( self.m_btnSortUp, 0, wx.ALL, 5 )
self.m_btnSortDown = wx.BitmapButton( sortingSizer.GetStaticBox(), wx.ID_ANY, wx.NullBitmap, wx.DefaultPosition, wx.DefaultSize, wx.BU_AUTODRAW|0 )
self.m_btnSortDown.SetBitmap( wx.NullBitmap )
self.m_btnSortDown.SetMinSize( wx.Size( 30,30 ) )
bSizer5.Add( self.m_btnSortDown, 0, wx.ALL, 5 )
self.m_btnSortAdd = wx.BitmapButton( sortingSizer.GetStaticBox(), wx.ID_ANY, wx.NullBitmap, wx.DefaultPosition, wx.DefaultSize, wx.BU_AUTODRAW|0 )
self.m_btnSortAdd.SetMinSize( wx.Size( 30,30 ) )
bSizer5.Add( self.m_btnSortAdd, 0, wx.ALL, 5 )
self.m_btnSortRemove = wx.BitmapButton( sortingSizer.GetStaticBox(), wx.ID_ANY, wx.NullBitmap, wx.DefaultPosition, wx.DefaultSize, wx.BU_AUTODRAW|0 )
self.m_btnSortRemove.SetMinSize( wx.Size( 30,30 ) )
bSizer5.Add( self.m_btnSortRemove, 0, wx.ALL, 5 )
bSizer4.Add( bSizer5, 0, 0, 5 )
sortingSizer.Add( bSizer4, 1, wx.EXPAND, 5 )
bSizer32.Add( sortingSizer, 1, wx.ALL|wx.EXPAND, 5 )
blacklistSizer = wx.StaticBoxSizer( wx.StaticBox( self, wx.ID_ANY, u"Component blacklist" ), wx.VERTICAL )
bSizer412 = wx.BoxSizer( wx.HORIZONTAL )
bSizer612 = wx.BoxSizer( wx.VERTICAL )
blacklistBoxChoices = []
self.blacklistBox = wx.ListBox( blacklistSizer.GetStaticBox(), wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize, blacklistBoxChoices, wx.LB_SINGLE|wx.LB_SORT|wx.BORDER_SIMPLE )
bSizer612.Add( self.blacklistBox, 1, wx.ALL|wx.EXPAND, 5 )
bSizer412.Add( bSizer612, 1, wx.EXPAND, 5 )
bSizer512 = wx.BoxSizer( wx.VERTICAL )
self.m_btnBlacklistAdd = wx.BitmapButton( blacklistSizer.GetStaticBox(), wx.ID_ANY, wx.NullBitmap, wx.DefaultPosition, wx.DefaultSize, wx.BU_AUTODRAW|0 )
self.m_btnBlacklistAdd.SetMinSize( wx.Size( 30,30 ) )
bSizer512.Add( self.m_btnBlacklistAdd, 0, wx.ALL, 5 )
self.m_btnBlacklistRemove = wx.BitmapButton( blacklistSizer.GetStaticBox(), wx.ID_ANY, wx.NullBitmap, wx.DefaultPosition, wx.DefaultSize, wx.BU_AUTODRAW|0 )
self.m_btnBlacklistRemove.SetMinSize( wx.Size( 30,30 ) )
bSizer512.Add( self.m_btnBlacklistRemove, 0, wx.ALL, 5 )
bSizer412.Add( bSizer512, 0, 0, 5 )
blacklistSizer.Add( bSizer412, 1, wx.EXPAND, 5 )
self.m_staticText1 = wx.StaticText( blacklistSizer.GetStaticBox(), wx.ID_ANY, u"Globs are supported, e.g. MH*", wx.DefaultPosition, wx.DefaultSize, 0 )
self.m_staticText1.Wrap( -1 )
blacklistSizer.Add( self.m_staticText1, 0, wx.ALL, 5 )
self.blacklistVirtualCheckbox = wx.CheckBox( blacklistSizer.GetStaticBox(), wx.ID_ANY, u"Blacklist virtual components", wx.DefaultPosition, wx.DefaultSize, 0 )
self.blacklistVirtualCheckbox.SetValue(True)
blacklistSizer.Add( self.blacklistVirtualCheckbox, 0, wx.ALL, 5 )
self.blacklistEmptyValCheckbox = wx.CheckBox( blacklistSizer.GetStaticBox(), wx.ID_ANY, u"Blacklist components with empty value", wx.DefaultPosition, wx.DefaultSize, 0 )
blacklistSizer.Add( | |
<filename>examples/easygame.py
def degrees(d):
"""Convert degrees to radians.
Arguments:
d -- Angle in degrees.
"""
import math
return d / 180 * math.pi
def rotate(vector, angle):
"""Rotate a vector (x, y) by an angle in radians."""
import math
x, y = vector
sin, cos = math.sin(angle), math.cos(angle)
return (
cos * x - sin * y,
sin * x + cos * y,
)
class EasyGameError(Exception):
"""All exceptions raised from this module are of this type."""
pass
class _Camera:
def __init__(self, center, position, rotation, zoom):
self.center = center
self.position = position
self.rotation = rotation
self.zoom = zoom
class _Context:
_win = None
_fps = 60
_events = []
_camera = _Camera((0, 0), (0, 0), 0, 1)
_saved_cameras = []
_channels = {}
_fonts = {}
_ctx = _Context()
class CloseEvent:
"""Happens when user clicks the X button on the window."""
pass
_symbol_dict = None
def _symbol_to_string(key):
global _symbol_dict
import pyglet
if _symbol_dict is None:
_symbol_dict = {
pyglet.window.key.A: 'A',
pyglet.window.key.B: 'B',
pyglet.window.key.C: 'C',
pyglet.window.key.D: 'D',
pyglet.window.key.E: 'E',
pyglet.window.key.F: 'F',
pyglet.window.key.G: 'G',
pyglet.window.key.H: 'H',
pyglet.window.key.I: 'I',
pyglet.window.key.J: 'J',
pyglet.window.key.K: 'K',
pyglet.window.key.L: 'L',
pyglet.window.key.M: 'M',
pyglet.window.key.N: 'N',
pyglet.window.key.O: 'O',
pyglet.window.key.P: 'P',
pyglet.window.key.Q: 'Q',
pyglet.window.key.R: 'R',
pyglet.window.key.S: 'S',
pyglet.window.key.T: 'T',
pyglet.window.key.U: 'U',
pyglet.window.key.V: 'V',
pyglet.window.key.W: 'W',
pyglet.window.key.X: 'X',
pyglet.window.key.Y: 'Y',
pyglet.window.key.Z: 'Z',
pyglet.window.key._0: '0',
pyglet.window.key._1: '1',
pyglet.window.key._2: '2',
pyglet.window.key._3: '3',
pyglet.window.key._4: '4',
pyglet.window.key._5: '5',
pyglet.window.key._6: '6',
pyglet.window.key._7: '7',
pyglet.window.key._8: '8',
pyglet.window.key._9: '9',
pyglet.window.key.SPACE: 'SPACE',
pyglet.window.key.ENTER: 'ENTER',
pyglet.window.key.BACKSPACE: 'BACKSPACE',
pyglet.window.key.ESCAPE: 'ESCAPE',
pyglet.window.key.LEFT: 'LEFT',
pyglet.window.key.RIGHT: 'RIGHT',
pyglet.window.key.UP: 'UP',
pyglet.window.key.DOWN: 'DOWN',
pyglet.window.mouse.LEFT: 'LEFT',
pyglet.window.mouse.RIGHT: 'RIGHT',
pyglet.window.mouse.MIDDLE: 'MIDDLE',
}
if key not in _symbol_dict:
return None
return _symbol_dict[key]
class KeyDownEvent:
"""Happens when user pressed a key on the keyboard.
Fields:
key -- String representation of the pressed key.
These are: 'A' ... 'Z',
'0' ... '9',
'SPACE', 'ENTER', 'BACKSPACE', 'ESCAPE',
'LEFT', 'RIGHT', 'UP, 'DOWN'.
"""
def __init__(self, key):
self.key = key
class KeyUpEvent:
"""Happens when user releases a key on the keyboard.
Fields:
key -- String representation of the released key.
These are: 'A' ... 'Z',
'0' ... '9',
'SPACE', 'ENTER', 'BACKSPACE', 'ESCAPE',
'LEFT', 'RIGHT', 'UP, 'DOWN'.
"""
def __init__(self, key):
self.key = key
class TextEvent:
"""Happens when user types a text on the keyboard.
Fields:
text -- A string containing the typed text.
"""
def __init__(self, text):
self.text = text
class MouseMoveEvent:
"""Happens when user moves the mouse.
Fields:
x -- The current X coordinate of the mouse.
y -- The current Y coordinate of the mouse.
dx -- Difference from the previous X coordinate.
dy -- Difference from the previous Y coordinate.
"""
def __init__(self, x, y, dx, dy):
self.x = x
self.y = y
self.dx = dx
self.dy = dy
class MouseDownEvent:
"""Happens when user presses a mouse button.
Fields:
x -- The current X coordinate of the mouse.
y -- The current Y coordinate of the mouse.
button -- String representation of the pressed button.
These are: 'LEFT', 'RIGHT', 'MIDDLE'.
"""
def __init__(self, x, y, button):
self.x = x
self.y = y
self.button = button
class MouseUpEvent:
"""Happens when user releases a mouse button.
Fields:
x -- The current X coordinate of the mouse.
y -- The current Y coordinate of the mouse.
button -- String representation of the released button.
These are: 'LEFT', 'RIGHT', 'MIDDLE'.
"""
def __init__(self, x, y, button):
self.x = x
self.y = y
self.button = button
def _update_camera():
global _ctx
import pyglet, math
pyglet.gl.glViewport(0, 0, _ctx._win.width, _ctx._win.height)
pyglet.gl.glMatrixMode(pyglet.gl.GL_PROJECTION)
pyglet.gl.glLoadIdentity()
pyglet.gl.glOrtho(0, _ctx._win.width, 0, _ctx._win.height, -1, 1)
pyglet.gl.glTranslatef(_ctx._camera.center[0], _ctx._camera.center[1], 0)
pyglet.gl.glRotatef(-_ctx._camera.rotation/math.pi*180, 0, 0, 1)
pyglet.gl.glScalef(_ctx._camera.zoom, _ctx._camera.zoom, 1)
pyglet.gl.glTranslatef(-_ctx._camera.position[0], -_ctx._camera.position[1], 0)
def open_window(title, width, height, fps=60, double_buffer=True):
"""Open a window with the specified parameters. Only one window can be open at any time.
Arguments:
title -- Text at the top of the window.
width -- Width of the window in pixels.
height -- Height of the window in pixels.
fps -- Maximum number of frames per second. (Defaults to 60.)
double_buffer -- Use False for a single-buffered window. Only use this if you are Tellegar or know what you are doing.
"""
global _ctx
import pyglet
if _ctx._win is not None:
raise EasyGameError('window already open')
pyglet.options['audio'] = ('openal', 'pulse', 'directsound', 'silent')
config = None
if not double_buffer:
config = pyglet.gl.Config(double_buffer = False)
_ctx._win = pyglet.window.Window(caption=title, width=width, height=height, config=config)
_ctx._fps = fps
_ctx._win.switch_to()
_ctx._camera = _Camera((0, 0), (0, 0), 0, 1)
_ctx._saved_cameras = []
_ctx._channels = {}
_ctx._fonts = {}
pyglet.gl.glBlendFunc(pyglet.gl.GL_SRC_ALPHA, pyglet.gl.GL_ONE_MINUS_SRC_ALPHA)
pyglet.gl.glEnable(pyglet.gl.GL_BLEND)
_update_camera()
_ctx._win.dispatch_events()
@_ctx._win.event
def on_close():
global _ctx
_ctx._events.append(CloseEvent())
return pyglet.event.EVENT_HANDLED
@_ctx._win.event
def on_key_press(symbol, modifiers):
global _ctx
key = _symbol_to_string(symbol)
if key is None:
return
_ctx._events.append(KeyDownEvent(key))
return pyglet.event.EVENT_HANDLED
@_ctx._win.event
def on_key_release(symbol, modifiers):
global _ctx
key = _symbol_to_string(symbol)
if key is None:
return
_ctx._events.append(KeyUpEvent(key))
return pyglet.event.EVENT_HANDLED
@_ctx._win.event
def on_text(text):
global _ctx
_ctx._events.append(TextEvent(text))
return pyglet.event.EVENT_HANDLED
@_ctx._win.event
def on_mouse_motion(x, y, dx, dy):
global _ctx
_ctx._events.append(MouseMoveEvent(x, y, dx, dy))
return pyglet.event.EVENT_HANDLED
@_ctx._win.event
def on_mouse_drag(x, y, dx, dy, buttons, modifiers):
global _ctx
_ctx._events.append(MouseMoveEvent(x, y, dx, dy))
return pyglet.event.EVENT_HANDLED
@_ctx._win.event
def on_mouse_press(x, y, symbol, modifiers):
global _ctx
button = _symbol_to_string(symbol)
if button is None:
return
_ctx._events.append(MouseDownEvent(x, y, button))
return pyglet.event.EVENT_HANDLED
@_ctx._win.event
def on_mouse_release(x, y, symbol, modifiers):
global _ctx
button = _symbol_to_string(symbol)
if button is None:
return
_ctx._events.append(MouseUpEvent(x, y, button))
return pyglet.event.EVENT_HANDLED
def close_window():
"""Close the window. Raises an exception if no window is open."""
global _ctx
if _ctx._win is None:
raise EasyGameError('window not open')
_ctx._win.close()
_ctx._win = None
def poll_events():
"""Return a list of events that happened since the last call to this function.
There are 7 types of events:
CloseEvent, KeyDownEvent, KeyUpEvent, TextEvent, MouseMoveEvent, MouseDownEvent, MouseUpEvent.
CloseEvent has no fields.
Both KeyUpEvent and KeyDownEvent have a field called key, which contains a string representation
of the pressed/released key. These are:
- 'A' ... 'Z'
- '0' ... '9'
- 'SPACE', 'ENTER', 'BACKSPACE', 'ESCAPE'
- 'LEFT', 'RIGHT', 'UP, 'DOWN'.
TextEvent has one field: text. This field contains a string of text that has been typed
on the keyboard.
All mouse events have fields x and y, telling the current mouse position.
MouseMoveEvent has additional dx, dy fields telling the difference of the current mouse
position from the previous one.
MouseDownEvent and MouseUpEvent have an additional button field, which contains a string
representation of the pressed/released mouse button. These are:
- 'LEFT', 'RIGHT', 'MIDDLE'.
"""
global _ctx
import pyglet
if _ctx._win is None:
raise EasyGameError('window not open')
_ctx._events = []
_ctx._win.dispatch_events()
return list(_ctx._events)
def next_frame():
"""Show the content of the window and waits until it's time for the next frame."""
global _ctx
import time
import pyglet
if _ctx._win is None:
raise EasyGameError('window not open')
_ctx._win.flip()
dt = pyglet.clock.tick()
if dt < 1 / _ctx._fps:
time.sleep(1/_ctx._fps - dt)
def fill(r, g, b):
"""Fill the whole window with a single color.
The r, g, b components of the color should be between 0 and 1.
"""
global _ctx
import pyglet
if _ctx._win is None:
raise EasyGameError('window not open')
pyglet.gl.glClearColor(r, g, b, 1)
_ctx._win.clear()
class _Image:
def __init__(self, img):
import pyglet
self._img = img
self._sprite = pyglet.sprite.Sprite(img)
@property
def width(self):
return self._img.width
@property
def height(self):
return self._img.height
@property
def center(self):
return (self._img.width//2, self._img.height//2)
def load_image(path):
"""Load an image from the specified path. PNG, JPEG, and many more formats are supported.
Returns the loaded image.
Arguments:
path -- Path to the image file. (For example 'images/crying_baby.png'.)
"""
import pyglet
return _Image(pyglet.resource.image(path))
def load_sheet(path, frame_width, frame_height):
"""Load a sprite sheet from the specified path and slices it into frames of the specified size.
Returns the list of images corresponding to the individual slices.
Arguments:
path -- Path to the sprite sheet.
frame_width -- Width of a single frame.
frame_height -- Height of a single frame.
"""
import pyglet
img = pyglet.resource.image(path)
frames = []
for x in map(lambda i: i * frame_width, range(img.width // frame_width)):
for y in map(lambda i: i * frame_height, range(img.height // frame_height)):
frames.append(img.get_region(x, y, frame_width, frame_height))
return frames
def image_data(image):
"""Returns a list of RGBA values of pixels of the image.
The pixels are listed row by row.
"""
raw = image._img.get_image_data()
pitch = | |
value=0.0, tags=tags101 + ['port:eth101/1/22'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/23'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/24'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/25'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/26'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/27'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/28'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/29'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/30'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/31'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/32'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/33'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/34'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/35'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/36'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/37'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/38'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/39'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/40'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/41'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/42'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags102 + ['port:eth1/33'], hostname=hn102)
aggregator.assert_metric(metric_name, value=0.0, tags=tags102 + ['port:eth1/34'], hostname=hn102)
aggregator.assert_metric(metric_name, value=0.0, tags=tags102 + ['port:eth1/35'], hostname=hn102)
aggregator.assert_metric(metric_name, value=0.0, tags=tags102 + ['port:eth1/36'], hostname=hn102)
aggregator.assert_metric(metric_name, value=0.0, tags=tags102 + ['port:eth1/37'], hostname=hn102)
aggregator.assert_metric(metric_name, value=0.0, tags=tags102 + ['port:eth1/38'], hostname=hn102)
aggregator.assert_metric(metric_name, value=0.0, tags=tags102 + ['port:eth1/39'], hostname=hn102)
aggregator.assert_metric(metric_name, value=0.0, tags=tags102 + ['port:eth1/40'], hostname=hn102)
aggregator.assert_metric(metric_name, value=0.0, tags=tags102 + ['port:eth1/41'], hostname=hn102)
aggregator.assert_metric(metric_name, value=0.0, tags=tags102 + ['port:eth1/42'], hostname=hn102)
aggregator.assert_metric(metric_name, value=0.0, tags=tags102 + ['port:eth1/43'], hostname=hn102)
aggregator.assert_metric(metric_name, value=0.0, tags=tags102 + ['port:eth1/44'], hostname=hn102)
aggregator.assert_metric(metric_name, value=0.0, tags=tags102 + ['port:eth1/45'], hostname=hn102)
aggregator.assert_metric(metric_name, value=0.0, tags=tags102 + ['port:eth1/46'], hostname=hn102)
aggregator.assert_metric(metric_name, value=0.0, tags=tags102 + ['port:eth1/47'], hostname=hn102)
aggregator.assert_metric(metric_name, value=0.0, tags=tags102 + ['port:eth1/48'], hostname=hn102)
aggregator.assert_metric(metric_name, value=0.0, tags=tags102 + ['port:eth1/49'], hostname=hn102)
aggregator.assert_metric(metric_name, value=0.0, tags=tags102 + ['port:eth1/50'], hostname=hn102)
aggregator.assert_metric(metric_name, value=0.0, tags=tags102 + ['port:eth1/51'], hostname=hn102)
aggregator.assert_metric(metric_name, value=0.0, tags=tags102 + ['port:eth1/52'], hostname=hn102)
aggregator.assert_metric(metric_name, value=0.0, tags=tags102 + ['port:eth1/53'], hostname=hn102)
aggregator.assert_metric(metric_name, value=0.0, tags=tags102 + ['port:eth1/54'], hostname=hn102)
aggregator.assert_metric(metric_name, value=0.0, tags=tags102 + ['port:eth1/1'], hostname=hn102)
aggregator.assert_metric(metric_name, value=0.0, tags=tags102 + ['port:eth1/2'], hostname=hn102)
aggregator.assert_metric(metric_name, value=0.0, tags=tags102 + ['port:eth1/3'], hostname=hn102)
aggregator.assert_metric(metric_name, value=0.0, tags=tags102 + ['port:eth1/4'], hostname=hn102)
aggregator.assert_metric(metric_name, value=1.0, tags=tags102 + ['port:eth1/15'], hostname=hn102)
aggregator.assert_metric(metric_name, value=0.0, tags=tags102 + ['port:eth1/16'], hostname=hn102)
aggregator.assert_metric(metric_name, value=0.0, tags=tags102 + ['port:eth1/17'], hostname=hn102)
aggregator.assert_metric(metric_name, value=0.0, tags=tags102 + ['port:eth1/18'], hostname=hn102)
aggregator.assert_metric(metric_name, value=0.0, tags=tags102 + ['port:eth1/5'], hostname=hn102)
aggregator.assert_metric(metric_name, value=1.0, tags=tags102 + ['port:eth1/6'], hostname=hn102)
aggregator.assert_metric(metric_name, value=0.0, tags=tags102 + ['port:eth1/7'], hostname=hn102)
aggregator.assert_metric(metric_name, value=0.0, tags=tags102 + ['port:eth1/8'], hostname=hn102)
aggregator.assert_metric(metric_name, value=0.0, tags=tags102 + ['port:eth1/9'], hostname=hn102)
aggregator.assert_metric(metric_name, value=0.0, tags=tags102 + ['port:eth1/10'], hostname=hn102)
aggregator.assert_metric(metric_name, value=0.0, tags=tags102 + ['port:eth1/11'], hostname=hn102)
aggregator.assert_metric(metric_name, value=0.0, tags=tags102 + ['port:eth1/12'], hostname=hn102)
aggregator.assert_metric(metric_name, value=0.0, tags=tags102 + ['port:eth1/13'], hostname=hn102)
aggregator.assert_metric(metric_name, value=0.0, tags=tags102 + ['port:eth1/14'], hostname=hn102)
aggregator.assert_metric(metric_name, value=0.0, tags=tags102 + ['port:eth1/20'], hostname=hn102)
aggregator.assert_metric(metric_name, value=0.0, tags=tags102 + ['port:eth1/19'], hostname=hn102)
aggregator.assert_metric(metric_name, value=0.0, tags=tags102 + ['port:eth1/21'], hostname=hn102)
aggregator.assert_metric(metric_name, value=0.0, tags=tags102 + ['port:eth1/22'], hostname=hn102)
aggregator.assert_metric(metric_name, value=0.0, tags=tags102 + ['port:eth1/23'], hostname=hn102)
aggregator.assert_metric(metric_name, value=0.0, tags=tags102 + ['port:eth1/24'], hostname=hn102)
aggregator.assert_metric(metric_name, value=0.0, tags=tags102 + ['port:eth1/25'], hostname=hn102)
aggregator.assert_metric(metric_name, value=0.0, tags=tags102 + ['port:eth1/26'], hostname=hn102)
aggregator.assert_metric(metric_name, value=0.0, tags=tags102 + ['port:eth1/27'], hostname=hn102)
aggregator.assert_metric(metric_name, value=0.0, tags=tags102 + ['port:eth1/28'], hostname=hn102)
aggregator.assert_metric(metric_name, value=0.0, tags=tags102 + ['port:eth1/29'], hostname=hn102)
aggregator.assert_metric(metric_name, value=0.0, tags=tags102 + ['port:eth1/30'], hostname=hn102)
aggregator.assert_metric(metric_name, value=0.0, tags=tags102 + ['port:eth1/31'], hostname=hn102)
aggregator.assert_metric(metric_name, value=0.0, tags=tags102 + ['port:eth1/32'], hostname=hn102)
aggregator.assert_metric(metric_name, value=0.0, tags=tags202 + ['port:eth1/33'], hostname=hn202)
aggregator.assert_metric(metric_name, value=0.0, tags=tags202 + ['port:eth1/34'], hostname=hn202)
aggregator.assert_metric(metric_name, value=0.0, tags=tags202 + ['port:eth1/35'], hostname=hn202)
aggregator.assert_metric(metric_name, value=0.0, tags=tags202 + ['port:eth1/36'], hostname=hn202)
aggregator.assert_metric(metric_name, value=0.0, tags=tags202 + ['port:eth1/1'], hostname=hn202)
aggregator.assert_metric(metric_name, value=0.0, tags=tags202 + ['port:eth1/2'], hostname=hn202)
aggregator.assert_metric(metric_name, value=0.0, tags=tags202 + ['port:eth1/3'], hostname=hn202)
aggregator.assert_metric(metric_name, value=0.0, tags=tags202 + ['port:eth1/4'], hostname=hn202)
aggregator.assert_metric(metric_name, value=0.0, tags=tags202 + ['port:eth1/5'], hostname=hn202)
aggregator.assert_metric(metric_name, value=0.0, tags=tags202 + ['port:eth1/6'], hostname=hn202)
aggregator.assert_metric(metric_name, value=0.0, tags=tags202 + ['port:eth1/7'], hostname=hn202)
aggregator.assert_metric(metric_name, value=0.0, tags=tags202 + ['port:eth1/8'], hostname=hn202)
aggregator.assert_metric(metric_name, value=0.0, tags=tags202 + ['port:eth1/9'], hostname=hn202)
aggregator.assert_metric(metric_name, value=0.0, tags=tags202 + ['port:eth1/10'], hostname=hn202)
aggregator.assert_metric(metric_name, value=0.0, tags=tags202 + ['port:eth1/11'], hostname=hn202)
aggregator.assert_metric(metric_name, value=0.0, tags=tags202 + ['port:eth1/12'], hostname=hn202)
aggregator.assert_metric(metric_name, value=0.0, tags=tags202 + ['port:eth1/13'], hostname=hn202)
aggregator.assert_metric(metric_name, value=0.0, tags=tags202 + ['port:eth1/14'], hostname=hn202)
aggregator.assert_metric(metric_name, value=0.0, tags=tags202 + ['port:eth1/15'], hostname=hn202)
aggregator.assert_metric(metric_name, value=0.0, tags=tags202 + ['port:eth1/16'], hostname=hn202)
aggregator.assert_metric(metric_name, value=0.0, tags=tags202 + ['port:eth1/17'], hostname=hn202)
aggregator.assert_metric(metric_name, value=0.0, tags=tags202 + ['port:eth1/18'], hostname=hn202)
aggregator.assert_metric(metric_name, value=0.0, tags=tags202 + ['port:eth1/19'], hostname=hn202)
aggregator.assert_metric(metric_name, value=0.0, tags=tags202 + ['port:eth1/20'], hostname=hn202)
aggregator.assert_metric(metric_name, value=0.0, tags=tags202 + ['port:eth1/21'], hostname=hn202)
aggregator.assert_metric(metric_name, value=0.0, tags=tags202 + ['port:eth1/22'], hostname=hn202)
aggregator.assert_metric(metric_name, value=0.0, tags=tags202 + ['port:eth1/23'], hostname=hn202)
aggregator.assert_metric(metric_name, value=0.0, tags=tags202 + ['port:eth1/24'], hostname=hn202)
aggregator.assert_metric(metric_name, value=0.0, tags=tags202 + ['port:eth1/25'], hostname=hn202)
aggregator.assert_metric(metric_name, value=0.0, tags=tags202 + ['port:eth1/26'], hostname=hn202)
aggregator.assert_metric(metric_name, value=0.0, tags=tags202 + ['port:eth1/27'], hostname=hn202)
aggregator.assert_metric(metric_name, value=0.0, tags=tags202 + ['port:eth1/28'], hostname=hn202)
aggregator.assert_metric(metric_name, value=0.0, tags=tags202 + ['port:eth1/29'], hostname=hn202)
aggregator.assert_metric(metric_name, value=0.0, tags=tags202 + ['port:eth1/30'], hostname=hn202)
aggregator.assert_metric(metric_name, value=0.0, tags=tags202 + ['port:eth1/31'], hostname=hn202)
aggregator.assert_metric(metric_name, value=0.0, tags=tags202 + ['port:eth1/32'], hostname=hn202)
aggregator.assert_metric(metric_name, value=0.0, tags=tags201 + ['port:eth1/33'], hostname=hn201)
aggregator.assert_metric(metric_name, value=0.0, tags=tags201 + ['port:eth1/34'], hostname=hn201)
aggregator.assert_metric(metric_name, value=0.0, tags=tags201 + ['port:eth1/35'], hostname=hn201)
aggregator.assert_metric(metric_name, value=0.0, tags=tags201 + ['port:eth1/36'], hostname=hn201)
aggregator.assert_metric(metric_name, value=0.0, tags=tags201 + ['port:eth1/1'], hostname=hn201)
aggregator.assert_metric(metric_name, value=0.0, tags=tags201 + ['port:eth1/2'], hostname=hn201)
aggregator.assert_metric(metric_name, value=0.0, tags=tags201 + ['port:eth1/3'], hostname=hn201)
aggregator.assert_metric(metric_name, value=0.0, tags=tags201 + ['port:eth1/4'], hostname=hn201)
aggregator.assert_metric(metric_name, value=0.0, tags=tags201 + ['port:eth1/5'], hostname=hn201)
aggregator.assert_metric(metric_name, value=0.0, tags=tags201 + ['port:eth1/6'], hostname=hn201)
aggregator.assert_metric(metric_name, value=0.0, tags=tags201 + ['port:eth1/7'], hostname=hn201)
aggregator.assert_metric(metric_name, value=0.0, tags=tags201 + ['port:eth1/8'], hostname=hn201)
aggregator.assert_metric(metric_name, value=0.0, tags=tags201 + ['port:eth1/9'], hostname=hn201)
aggregator.assert_metric(metric_name, value=0.0, tags=tags201 + ['port:eth1/10'], hostname=hn201)
aggregator.assert_metric(metric_name, value=0.0, tags=tags201 + ['port:eth1/11'], hostname=hn201)
aggregator.assert_metric(metric_name, value=0.0, tags=tags201 + ['port:eth1/12'], hostname=hn201)
aggregator.assert_metric(metric_name, value=0.0, tags=tags201 + ['port:eth1/13'], hostname=hn201)
aggregator.assert_metric(metric_name, value=0.0, tags=tags201 + ['port:eth1/14'], hostname=hn201)
aggregator.assert_metric(metric_name, value=0.0, tags=tags201 + ['port:eth1/15'], hostname=hn201)
aggregator.assert_metric(metric_name, value=0.0, tags=tags201 + ['port:eth1/16'], hostname=hn201)
aggregator.assert_metric(metric_name, value=0.0, tags=tags201 + ['port:eth1/17'], hostname=hn201)
aggregator.assert_metric(metric_name, value=0.0, tags=tags201 + ['port:eth1/18'], hostname=hn201)
aggregator.assert_metric(metric_name, value=0.0, tags=tags201 + ['port:eth1/19'], hostname=hn201)
aggregator.assert_metric(metric_name, value=0.0, tags=tags201 + ['port:eth1/20'], hostname=hn201)
aggregator.assert_metric(metric_name, value=0.0, tags=tags201 + ['port:eth1/21'], hostname=hn201)
aggregator.assert_metric(metric_name, value=0.0, tags=tags201 + ['port:eth1/22'], hostname=hn201)
aggregator.assert_metric(metric_name, value=0.0, tags=tags201 + ['port:eth1/23'], hostname=hn201)
aggregator.assert_metric(metric_name, value=0.0, tags=tags201 + ['port:eth1/24'], hostname=hn201)
aggregator.assert_metric(metric_name, value=0.0, tags=tags201 + ['port:eth1/25'], hostname=hn201)
aggregator.assert_metric(metric_name, value=0.0, tags=tags201 + ['port:eth1/26'], hostname=hn201)
aggregator.assert_metric(metric_name, value=0.0, tags=tags201 + ['port:eth1/27'], hostname=hn201)
aggregator.assert_metric(metric_name, value=0.0, tags=tags201 + ['port:eth1/28'], hostname=hn201)
aggregator.assert_metric(metric_name, value=0.0, tags=tags201 + ['port:eth1/29'], hostname=hn201)
aggregator.assert_metric(metric_name, value=0.0, tags=tags201 + ['port:eth1/30'], hostname=hn201)
aggregator.assert_metric(metric_name, value=0.0, tags=tags201 + ['port:eth1/31'], hostname=hn201)
aggregator.assert_metric(metric_name, value=0.0, tags=tags201 + ['port:eth1/32'], hostname=hn201)
metric_name = 'cisco_aci.fabric.node.mem.free.max'
aggregator.assert_metric(metric_name, value=13903716.0, tags=tagsleaf101, hostname=hn101)
aggregator.assert_metric(metric_name, value=13975396.0, tags=tagsleaf102, hostname=hn102)
aggregator.assert_metric(metric_name, value=5531456.0, tags=tagsspine202, hostname=hn202)
aggregator.assert_metric(metric_name, value=5480244.0, tags=tagsspine201, hostname=hn201)
metric_name = 'cisco_aci.fabric.node.health.min'
aggregator.assert_metric(metric_name, value=72.0, tags=tagsleaf101, hostname=hn101)
aggregator.assert_metric(metric_name, value=72.0, tags=tagsleaf102, hostname=hn102)
aggregator.assert_metric(metric_name, value=98.0, tags=tagsspine202, hostname=hn202)
aggregator.assert_metric(metric_name, value=99.0, tags=tagsspine201, hostname=hn201)
metric_name = 'cisco_aci.fabric.port.egr_drop_pkts.errors'
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/43'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/44'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/45'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/46'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/47'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/48'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth1/1'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth1/2'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth1/3'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth1/4'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth1/5'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth1/6'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth1/7'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth1/9'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth1/8'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth1/10'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth1/11'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth1/12'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth1/15'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth1/19'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth1/33'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth1/48'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth1/49'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth1/50'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/1'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/2'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/3'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/4'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/5'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/6'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/7'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/8'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/9'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/10'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/11'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/12'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/13'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/14'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/15'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/16'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/17'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/18'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/19'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/20'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/21'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/22'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/23'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/24'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/25'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/26'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/27'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/28'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/29'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/30'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/31'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/32'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/33'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/34'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/35'], hostname=hn101)
aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + | |
# Author: <NAME>
# Date: 5 Feb 2019
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Provides utilities to preprocess 3D tensors (gray-scale images, gray-scale
videos, speech/time series, text, etc).
If the example size is not fixed (e.g. images of different size), crop a region
then rescale to a fixed size with fixed height-width ratio.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
import numpy as np
import time
# tf.enable_eager_execution()
_GLOBAL_CROP_SIZE = (224,224)
_GLOBAL_NUM_FRAMES = 10
_GLOBAL_NUM_REPEAT = 4
_GLOBAL_CROP_RATIO = 0.5
_SHUFFLE_BUFFER = 1000
def _crop_3d_tensor(tensor_3d,
crop_size=_GLOBAL_CROP_SIZE,
num_frames=_GLOBAL_NUM_FRAMES,
num_repeat=_GLOBAL_NUM_REPEAT,
crop_ratio=_GLOBAL_CROP_RATIO):
"""Crop a batch of 3-D tensors to `crop_size`.
Args:
tensor_3d: A 3-D tensor batch of shape
(batch_size, sequence_size, row_count, col_count)
crop_size: A Tensor of type `int32`. A 1-D tensor of 2 elements,
size = [crop_height, crop_width]. All cropped image patches are
resized to this size. The aspect ratio of the image content is not
preserved. Both crop_height and crop_width need to be positive.
num_frames: Number of frames to keep (crop).
num_repeat: The number of repetition of cropping cycle for each batch.
crop_ratio: The ratio when cropping height and width.
Returns:
A Tensor of shape
[num_repeat * batch_size, num_frames, crop_height, crop_width]
where crop_size is equal to (crop_height, crop_width).
"""
if not tensor_3d.shape.ndims == 4:
raise ValueError("The shape of the tensor to crop should be " +
"[batch_size, sequence_size, row_count, col_count]!")
batch_size, sequence_size, row_count, col_count = tensor_3d.shape
# Crop time axis
# pad sequence if not long enough
pad_size = tf.maximum(num_frames - tf.shape(tensor_3d)[1], 0)
padded_tensor = tf.pad(tensor_3d, ((0,0), (0, pad_size), (0, 0), (0, 0)))
maxval = padded_tensor.shape[1] - num_frames + 1
# Randomly choose the beginning index of frames
begin = np.random.randint(0, maxval)
sliced_tensor = tf.slice(padded_tensor,
begin=[0, begin, 0, 0],
size=[-1, num_frames, -1, -1])
# Crop spatial axes
# First, transpose from [batch_size, sequence_size, row_count, col_count]
# to [batch_size, row_count, col_count, sequence_size]
sliced_tensor = tf.transpose(sliced_tensor, perm=[0, 2, 3, 1])
# sliced_tensor = tf.transpose(padded_tensor, perm=[0, 2, 3, 1])
# Then apply `tf.image.crop_and_resize` by precompute some size info
y1, x1 = tf.random.uniform(shape=[2, num_repeat * batch_size],
minval=0,
maxval=1 - crop_ratio)
y2 = y1 + crop_ratio
# = tf.random.uniform(shape=[num_repeat * batch_size],
# minval=0,
# maxval=1 - crop_ratio)
x2 = x1 + crop_ratio
boxes = tf.transpose([y1, x1, y2, x2])
box_ind = list(range(batch_size)) * num_repeat
# At last, crop and resize
resized_tensor = tf.image.crop_and_resize(sliced_tensor,
boxes,
box_ind,
crop_size)
return tf.transpose(resized_tensor, perm=[0, 3, 1, 2])
def crop_time_axis(tensor_3d, num_frames, begin_index=None):
"""Given a 3-D tensor, take a slice of length `num_frames` on its time axis.
Args:
tensor_3d: A Tensor of shape [sequence_size, row_count, col_count]
num_frames: An integer representing the resulted chunk (sequence) length
begin_index: The index of the beginning of the chunk. If `None`, chosen
randomly.
Returns:
A Tensor of sequence length `num_frames`, which is a chunk of `tensor_3d`.
"""
# pad sequence if not long enough
pad_size = tf.maximum(num_frames - tf.shape(tensor_3d)[1], 0)
padded_tensor = tf.pad(tensor_3d, ((0, pad_size), (0, 0), (0, 0)))
# If not given, randomly choose the beginning index of frames
if not begin_index:
maxval = tf.shape(padded_tensor)[1] - num_frames + 1
begin_index = tf.random.uniform([1],
minval=0,
maxval=maxval,
dtype=tf.int32)
begin_index = tf.stack([begin_index[0], 0, 0], name='begin_index')
sliced_tensor = tf.slice(padded_tensor,
begin=begin_index,
size=[num_frames, -1, -1])
return sliced_tensor
def resize_space_axes(tensor_3d, new_row_count, new_col_count):
"""Given a 3-D tensor, resize space axes have have target size.
Args:
tensor_3d: A Tensor of shape [sequence_size, row_count, col_count].
new_row_count: An integer indicating the target row count.
new_col_count: An integer indicating the target column count.
Returns:
A Tensor of shape [sequence_size, target_row_count, target_col_count].
"""
transposed = tf.transpose(tensor_3d, perm=[1, 2, 0])
resized = tf.image.resize_images(transposed,
(new_row_count, new_col_count))
return tf.transpose(resized, perm=[2, 0, 1])
def preprocess_tensor_3d(tensor_3d,
input_shape=None,
output_shape=None):
"""Preprocess a 3-D tensor.
Args:
tensor_3d: A Tensor of shape [sequence_size, row_count, col_count].
input_shape: The shape [sequence_size, row_count, col_count] of the input
examples
output_shape: The shape [sequence_size, row_count, col_count] of the oputput
examples. All components should be positive.
"""
if input_shape:
shape = [x if x > 0 else None for x in input_shape]
tensor_3d.set_shape(input_shape)
else:
tensor_3d.set_shape([None, None, None])
if output_shape and output_shape[0] > 0:
num_frames = output_shape[0]
else:
num_frames = _GLOBAL_NUM_FRAMES
if output_shape and output_shape[1] > 0:
new_row_count = output_shape[1]
else:
new_row_count=_GLOBAL_CROP_SIZE[0]
if output_shape and output_shape[2] > 0:
new_col_count = output_shape[2]
else:
new_col_count=_GLOBAL_CROP_SIZE[1]
tensor_t = crop_time_axis(tensor_3d, num_frames=num_frames)
tensor_ts = resize_space_axes(tensor_t,
new_row_count=new_row_count,
new_col_count=new_col_count)
return tensor_ts
def parse_record_fn(value, is_training, dtype):
"""For a (features, labels) pair `value`, apply preprocessing.
"""
# Retrieve first matrix bundle of `features` in the tensor tuples
# (matrix_bundle_0,...,matrix_bundle_(N-1), labels)
# i.e. matrix_bundle_0
tensor_3d = value[0]
# Label is the last element of value
labels = value[-1]
tensor_3d_preprocessed = preprocess_tensor_3d(tensor_3d)
print("tensor_3d_preprocessed:", tensor_3d_preprocessed) # TODO
return tensor_3d_preprocessed, labels
def input_function(dataset,
is_training,
batch_size,
shuffle_buffer=_SHUFFLE_BUFFER,
parse_record_fn=parse_record_fn,
num_epochs=1,
dtype=tf.float32,
datasets_num_private_threads=None,
num_parallel_batches=1):
"""Given a Dataset of 3-D tensors, return an iterator over the records.
Inspired from:
https://github.com/tensorflow/models/blob/master/official/resnet/resnet_run_loop.py#L49
Args:
dataset: A Dataset representing 3-D tensors. Each example in this dataset
has shape [sequence_size, row_count, col_count].
is_training: A boolean denoting whether the input is for training.
batch_size: The number of examples per batch.
shuffle_buffer: The buffer size to use when shuffling records. A larger
value results in better randomness, but smaller values reduce startup
time and use less memory.
parse_record_fn: A function that takes a raw record and returns the
corresponding (features, labels) pair.
num_epochs: The number of epochs to repeat the dataset.
dtype: Data type to use for images/features.
datasets_num_private_threads: Number of threads for a private
threadpool created for all datasets computation.
num_parallel_batches: Number of parallel batches for tf.data.
Returns:
Dataset of (features, labels) pairs ready for iteration, where `features` is
a 4-D tensor with known shape:
[batch_size, new_sequence_size, new_row_count, new_col_count]
"""
# Prefetches a batch at a time to smooth out the time taken to load input
# files for shuffling and processing.
# dataset = dataset.prefetch(buffer_size=batch_size)
if is_training:
# Shuffles records before repeating to respect epoch boundaries.
dataset = dataset.shuffle(buffer_size=shuffle_buffer)
# Repeats the dataset for the number of epochs to train.
# dataset = dataset.repeat(num_epochs)
# Parses the raw records into images and labels.
dataset = dataset.apply(
tf.data.experimental.map_and_batch(
lambda *value: parse_record_fn(value, is_training, dtype),
batch_size=batch_size,
num_parallel_batches=num_parallel_batches,
drop_remainder=False))
# Operations between the final prefetch and the get_next call to the iterator
# will happen synchronously during run time. We prefetch here again to
# background all of the above processing work and keep it out of the
# critical training path. Setting buffer_size to tf.contrib.data.AUTOTUNE
# allows DistributionStrategies to adjust how many batches to fetch based
# on how many devices are present.
# dataset = dataset.prefetch(buffer_size=tf.contrib.data.AUTOTUNE)
# Defines a specific size thread pool for tf.data operations.
if datasets_num_private_threads:
tf.compat.v1.logging.info('datasets_num_private_threads: %s',
datasets_num_private_threads)
dataset = threadpool.override_threadpool(
dataset,
threadpool.PrivateThreadPool(
datasets_num_private_threads,
display_name='input_pipeline_thread_pool'))
return dataset
def print_first_element(dataset):
iterator = dataset.make_initializable_iterator()
next_element = iterator.get_next()
writer = tf.summary.FileWriter('.')
writer.add_graph(tf.get_default_graph())
writer.flush()
with tf.Session() as sess:
sess.run(iterator.initializer)
show_all_nodes() # TODO: to delete
haha = sess.run(next_element)
print(haha)
def test_crop():
t_shape = (3, 100, 4, 4)
tensor_3d = tf.random.uniform(t_shape)
# print("Original tensor:" , tensor_3d, '\n', tensor_3d.shape)
crop_size = (224, 224)
cropped_tensor = _crop_3d_tensor(tensor_3d, crop_size)
print("Cropped tensor:", cropped_tensor, '\n', cropped_tensor.shape)
def test_resize_space_axes():
t_shape = [None, None, None]
tensor_3d = tf.placeholder(tf.float32, shape=t_shape)
print("tensor_3d.shape.eval():", tensor_3d.shape)
res = resize_space_axes(tensor_3d,
new_row_count=_GLOBAL_CROP_SIZE[0],
new_col_count=_GLOBAL_CROP_SIZE[1])
with tf.Session() as sess:
rand_array = np.random.rand(100, 224, 224)
print(sess.run(res, feed_dict={tensor_3d: rand_array}))
print(res.shape)
def test_crop_time_axis():
t_shape = [None, None, None]
tensor_3d = tf.placeholder(tf.float32, shape=t_shape)
print("tensor_3d.shape.eval():", tensor_3d.shape)
res = crop_time_axis(tensor_3d,
num_frames=_GLOBAL_NUM_FRAMES)
with tf.Session() as sess:
rand_array = np.random.rand(100, 224, 224)
# print(sess.run(res, feed_dict={tensor_3d: rand_array}))
haha = tf.get_default_graph().get_tensor_by_name("begin_stacked:0")
print(haha)
print(sess.run(haha, feed_dict={tensor_3d: rand_array}))
print(res.shape)
def test_tensorflow():
t_shape = [None, None, None]
tensor_unknown = tf.placeholder(tf.float32, shape=t_shape)
# u_shape = [-1, -1, -1]
# tensor_unknown.set_shape(t_shape)
print("tensor_unknown:", tensor_unknown)
def test_input_fn():
"""Test for the funtion `input_fn`."""
# dataset_dir = '/Users/evariste/projects/autodl-contrib/formatted_datasets/itwas/itwas.data/train'
dataset_dir | |
"""
used for storing the created models
"""
#split attention是在什么时候起作用的呢?
from __future__ import print_function, division
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.data
import torch
from torch.nn import Conv2d, Module, Linear, BatchNorm2d, ReLU
from torch.nn.modules.utils import _pair
import math
class SplAtConv2d(Module):
"""Split-Attention Conv2d
"""
def __init__(self, in_channels, channels, kernel_size, stride=(1, 1), padding=(0, 0),
dilation=(1, 1), groups=1, bias=True,
radix=2, reduction_factor=4,
rectify=False, rectify_avg=False, norm_layer=None,
dropblock_prob=0.0, **kwargs):
super(SplAtConv2d, self).__init__()
padding = _pair(padding)
self.rectify = rectify and (padding[0] > 0 or padding[1] > 0)
self.rectify_avg = rectify_avg
inter_channels = max(in_channels*radix//reduction_factor, 32)
self.radix = radix
self.cardinality = groups
self.channels = channels
self.dropblock_prob = dropblock_prob
if self.rectify:
from rfconv import RFConv2d
self.conv = RFConv2d(in_channels, channels*radix, kernel_size, stride, padding, dilation,
groups=groups*radix, bias=bias, average_mode=rectify_avg, **kwargs)
else:
self.conv = Conv2d(in_channels, channels*radix, kernel_size, stride, padding, dilation,
groups=groups*radix, bias=bias, **kwargs)
self.use_bn = norm_layer is not None
if self.use_bn:
self.bn0 = norm_layer(channels*radix)
self.relu = ReLU(inplace=True)
self.fc1 = Conv2d(channels, inter_channels, 1, groups=self.cardinality)
if self.use_bn:
self.bn1 = norm_layer(inter_channels)
self.fc2 = Conv2d(inter_channels, channels*radix, 1, groups=self.cardinality)
if dropblock_prob > 0.0:
self.dropblock = DropBlock2D(dropblock_prob, 3)
self.rsoftmax = rSoftMax(radix, groups)
def forward(self, x):
x = self.conv(x)
if self.use_bn:
x = self.bn0(x)
if self.dropblock_prob > 0.0:
x = self.dropblock(x)
x = self.relu(x)
batch, rchannel = x.shape[:2]
if self.radix > 1:
if torch.__version__ < '1.5':
splited = torch.split(x, int(rchannel//self.radix), dim=1)
else:
splited = torch.split(x, rchannel//self.radix, dim=1)
gap = sum(splited)
else:
gap = x
gap = F.adaptive_avg_pool2d(gap, 1)
gap = self.fc1(gap)
if self.use_bn:
gap = self.bn1(gap)
gap = self.relu(gap)
atten = self.fc2(gap)
atten = self.rsoftmax(atten).view(batch, -1, 1, 1)
if self.radix > 1:
if torch.__version__ < '1.5':
attens = torch.split(atten, int(rchannel//self.radix), dim=1)
else:
attens = torch.split(atten, rchannel//self.radix, dim=1)
out = sum([att*split for (att, split) in zip(attens, splited)])
else:
out = atten * x
return out.contiguous()
class rSoftMax(nn.Module):
def __init__(self, radix, cardinality):
super().__init__()
self.radix = radix
self.cardinality = cardinality
def forward(self, x):
batch = x.size(0)
if self.radix > 1:
x = x.view(batch, self.cardinality, self.radix, -1).transpose(1, 2)
x = F.softmax(x, dim=1)
x = x.reshape(batch, -1)
else:
x = torch.sigmoid(x)
return x
"""ResNet variants"""
class DropBlock2D(object):
def __init__(self, *args, **kwargs):
raise NotImplementedError
class GlobalAvgPool2d(nn.Module):
def __init__(self):
"""Global average pooling over the input's spatial dimensions"""
super(GlobalAvgPool2d, self).__init__()
def forward(self, inputs):
return nn.functional.adaptive_avg_pool2d(inputs, 1).view(inputs.size(0), -1)
class Bottleneck(nn.Module):
"""ResNet Bottleneck
"""
# pylint: disable=unused-argument
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None,
radix=1, cardinality=1, bottleneck_width=64,
avd=False, avd_first=False, dilation=1, is_first=False,
rectified_conv=False, rectify_avg=False,
norm_layer=None, dropblock_prob=0.0, last_gamma=False):
super(Bottleneck, self).__init__()
group_width = int(planes * (bottleneck_width / 64.)) * cardinality
self.conv1 = nn.Conv2d(inplanes, group_width, kernel_size=1, bias=False)
self.bn1 = norm_layer(group_width)
self.dropblock_prob = dropblock_prob
self.radix = radix
self.avd = avd and (stride > 1 or is_first)
self.avd_first = avd_first
if self.avd:
self.avd_layer = nn.AvgPool2d(3, stride, padding=1)
stride = 1
if dropblock_prob > 0.0:
self.dropblock1 = DropBlock2D(dropblock_prob, 3)
if radix == 1:
self.dropblock2 = DropBlock2D(dropblock_prob, 3)
self.dropblock3 = DropBlock2D(dropblock_prob, 3)
if radix >= 1:
self.conv2 = SplAtConv2d(
group_width, group_width, kernel_size=3,
stride=stride, padding=dilation,
dilation=dilation, groups=cardinality, bias=False,
radix=radix, rectify=rectified_conv,
rectify_avg=rectify_avg,
norm_layer=norm_layer,
dropblock_prob=dropblock_prob)
elif rectified_conv:
from rfconv import RFConv2d
self.conv2 = RFConv2d(
group_width, group_width, kernel_size=3, stride=stride,
padding=dilation, dilation=dilation,
groups=cardinality, bias=False,
average_mode=rectify_avg)
self.bn2 = norm_layer(group_width)
else:
self.conv2 = nn.Conv2d(
group_width, group_width, kernel_size=3, stride=stride,
padding=dilation, dilation=dilation,
groups=cardinality, bias=False)
self.bn2 = norm_layer(group_width)
self.conv3 = nn.Conv2d(
group_width, planes * 4, kernel_size=1, bias=False)
self.bn3 = norm_layer(planes*4)
if last_gamma:
from torch.nn.init import zeros_
zeros_(self.bn3.weight)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.dilation = dilation
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
if self.dropblock_prob > 0.0:
out = self.dropblock1(out)
out = self.relu(out)
if self.avd and self.avd_first:
out = self.avd_layer(out)
out = self.conv2(out)
if self.radix == 0:
out = self.bn2(out)
if self.dropblock_prob > 0.0:
out = self.dropblock2(out)
out = self.relu(out)
if self.avd and not self.avd_first:
out = self.avd_layer(out)
out = self.conv3(out)
out = self.bn3(out)
if self.dropblock_prob > 0.0:
out = self.dropblock3(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class ResNet(nn.Module):
"""ResNet Variants
Parameters
----------
block : Block
Class for the residual block. Options are BasicBlockV1, BottleneckV1.
layers : list of int
Numbers of layers in each block
classes : int, default 1000
Number of classification classes.
dilated : bool, default False
Applying dilation strategy to pretrained ResNet yielding a stride-8 model,
typically used in Semantic Segmentation.
norm_layer : object
Normalization layer used in backbone network (default: :class:`mxnet.gluon.nn.BatchNorm`;
for Synchronized Cross-GPU BachNormalization).
Reference:
- <NAME>, et al. "Deep residual learning for image recognition." Proceedings of the IEEE conference on computer vision and pattern recognition. 2016.
- <NAME>, and <NAME>. "Multi-scale context aggregation by dilated convolutions."
"""
# pylint: disable=unused-variable
def __init__(self, block, layers, radix=1, groups=1, bottleneck_width=64,
num_classes=1000, dilated=False, dilation=1,
deep_stem=False, stem_width=64, avg_down=False,
rectified_conv=False, rectify_avg=False,
avd=False, avd_first=False,
final_drop=0.0, dropblock_prob=0,
last_gamma=False, norm_layer=nn.BatchNorm2d):
self.cardinality = groups
self.bottleneck_width = bottleneck_width
# ResNet-D params
self.inplanes = stem_width*2 if deep_stem else 64
self.avg_down = avg_down
self.last_gamma = last_gamma
# ResNeSt params
self.radix = radix
self.avd = avd
self.avd_first = avd_first
super(ResNet, self).__init__()
self.rectified_conv = rectified_conv
self.rectify_avg = rectify_avg
if rectified_conv:
from rfconv import RFConv2d
conv_layer = RFConv2d
else:
conv_layer = nn.Conv2d
conv_kwargs = {'average_mode': rectify_avg} if rectified_conv else {}
if deep_stem:
self.conv1 = nn.Sequential(
conv_layer(3, stem_width, kernel_size=3, stride=2, padding=1, bias=False, **conv_kwargs),
norm_layer(stem_width),
nn.ReLU(inplace=True),
conv_layer(stem_width, stem_width, kernel_size=3, stride=1, padding=1, bias=False, **conv_kwargs),
norm_layer(stem_width),
nn.ReLU(inplace=True),
conv_layer(stem_width, stem_width*2, kernel_size=3, stride=1, padding=1, bias=False, **conv_kwargs),
)
else:
self.conv1 = conv_layer(3, 64, kernel_size=7, stride=2, padding=3,
bias=False, **conv_kwargs)
self.bn1 = norm_layer(self.inplanes)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0], norm_layer=norm_layer, is_first=False)
self.layer2 = self._make_layer(block, 128, layers[1], stride=2, norm_layer=norm_layer)
if dilated or dilation == 4:
self.layer3 = self._make_layer(block, 256, layers[2], stride=1,
dilation=2, norm_layer=norm_layer,
dropblock_prob=dropblock_prob)
self.layer4 = self._make_layer(block, 512, layers[3], stride=1,
dilation=4, norm_layer=norm_layer,
dropblock_prob=dropblock_prob)
elif dilation==2:
self.layer3 = self._make_layer(block, 256, layers[2], stride=2,
dilation=1, norm_layer=norm_layer,
dropblock_prob=dropblock_prob)
self.layer4 = self._make_layer(block, 512, layers[3], stride=1,
dilation=2, norm_layer=norm_layer,
dropblock_prob=dropblock_prob)
else:
self.layer3 = self._make_layer(block, 256, layers[2], stride=2,
norm_layer=norm_layer,
dropblock_prob=dropblock_prob)
self.layer4 = self._make_layer(block, 512, layers[3], stride=2,
norm_layer=norm_layer,
dropblock_prob=dropblock_prob)
self.avgpool = GlobalAvgPool2d()
self.drop = nn.Dropout(final_drop) if final_drop > 0.0 else None
self.fc = nn.Linear(512 * block.expansion, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, norm_layer):
m.weight.data.fill_(1)
m.bias.data.zero_()
def _make_layer(self, block, planes, blocks, stride=1, dilation=1, norm_layer=None,
dropblock_prob=0.0, is_first=True):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
down_layers = []
if self.avg_down:
if dilation == 1:
down_layers.append(nn.AvgPool2d(kernel_size=stride, stride=stride,
ceil_mode=True, count_include_pad=False))
else:
down_layers.append(nn.AvgPool2d(kernel_size=1, stride=1,
ceil_mode=True, count_include_pad=False))
down_layers.append(nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=1, bias=False))
else:
down_layers.append(nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False))
down_layers.append(norm_layer(planes * block.expansion))
downsample = nn.Sequential(*down_layers)
layers = []
if dilation == 1 or dilation == 2:
layers.append(block(self.inplanes, planes, stride, downsample=downsample,
radix=self.radix, cardinality=self.cardinality,
bottleneck_width=self.bottleneck_width,
avd=self.avd, avd_first=self.avd_first,
dilation=1, is_first=is_first, rectified_conv=self.rectified_conv,
rectify_avg=self.rectify_avg,
norm_layer=norm_layer, dropblock_prob=dropblock_prob,
last_gamma=self.last_gamma))
elif dilation == 4:
layers.append(block(self.inplanes, planes, stride, downsample=downsample,
radix=self.radix, cardinality=self.cardinality,
bottleneck_width=self.bottleneck_width,
avd=self.avd, avd_first=self.avd_first,
dilation=2, is_first=is_first, rectified_conv=self.rectified_conv,
rectify_avg=self.rectify_avg,
norm_layer=norm_layer, dropblock_prob=dropblock_prob,
last_gamma=self.last_gamma))
else:
raise RuntimeError("=> unknown dilation size: {}".format(dilation))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes,
radix=self.radix, cardinality=self.cardinality,
bottleneck_width=self.bottleneck_width,
avd=self.avd, avd_first=self.avd_first,
dilation=dilation, rectified_conv=self.rectified_conv,
rectify_avg=self.rectify_avg,
norm_layer=norm_layer, dropblock_prob=dropblock_prob,
last_gamma=self.last_gamma))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
#x = x.view(x.size(0), -1)
x = torch.flatten(x, 1)
if self.drop:
x = self.drop(x)
x = self.fc(x)
return x
_url_format = 'https://s3.us-west-1.wasabisys.com/resnest/torch/{}-{}.pth'
_model_sha256 = {name: checksum for checksum, name in [
('d8fbf808', 'resnest50_fast_1s1x64d'),
('44938639', 'resnest50_fast_2s1x64d'),
('f74f3fc3', 'resnest50_fast_4s1x64d'),
('32830b84', 'resnest50_fast_1s2x40d'),
('9d126481', 'resnest50_fast_2s2x40d'),
('41d14ed0', 'resnest50_fast_4s2x40d'),
('d4a4f76f', 'resnest50_fast_1s4x24d'),
]}
def short_hash(name):
if name not in _model_sha256:
raise ValueError('Pretrained model for {name} is not available.'.format(name=name))
return _model_sha256[name][:8]
resnest_model_urls = {name: _url_format.format(name, short_hash(name)) for
name in _model_sha256.keys()
}
def resnest50_fast_1s1x64d(pretrained=False, root='~/.encoding/models', **kwargs):
model = ResNet(Bottleneck, [3, 4, 6, 3],
radix=1, groups=1, bottleneck_width=64,
deep_stem=True, stem_width=32, avg_down=True,
avd=True, avd_first=True, **kwargs)
if pretrained:
model.load_state_dict(torch.hub.load_state_dict_from_url(
resnest_model_urls['resnest50_fast_1s1x64d'], progress=True, check_hash=True))
return model
def resnest50_fast_2s1x64d(pretrained=False, root='~/.encoding/models', **kwargs):
model = ResNet(Bottleneck, [3, 4, 6, 3],
radix=2, groups=1, bottleneck_width=64,
deep_stem=True, stem_width=32, avg_down=True,
avd=True, avd_first=True, **kwargs)
if pretrained:
model.load_state_dict(torch.hub.load_state_dict_from_url(
resnest_model_urls['resnest50_fast_2s1x64d'], progress=True, check_hash=True))
return model
def resnest50_fast_4s1x64d(pretrained=False, root='~/.encoding/models', **kwargs):
model = ResNet(Bottleneck, [3, 4, 6, 3],
radix=4, | |
<filename>inventory/inventory/doctype/packing_list_receipt/packing_list_receipt.py<gh_stars>0
# -*- coding: utf-8 -*-
# Copyright (c) 2015, Myme and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe.model.document import Document
from frappe import msgprint
import frappe.utils
from frappe.utils import cstr, flt, getdate, comma_and, cint
from frappe import _
from frappe.model.mapper import get_mapped_doc
import operator
form_grid_templates = {
"packing_list_data": "templates/includes/item_grid_packing_list.html",
"summary_purchase_order": "templates/includes/tabel_purchase_order.html"
}
class PackingListReceipt(Document):
def add_item(self):
if self.is_return == 1 :
count = 0
if self.item_code_variant_depan and self.yard_atau_meter and self.colour and self.warehouse :
master_item = frappe.get_doc("Item", self.item_code_variant_depan)
parent_item = master_item.variant_of
item_name = master_item.item_name
if self.packing_list_data :
for i in self.packing_list_data :
if self.group_prefix and self.group_code :
if i.item_code_variant == self.item_code_variant_depan and i.yard_atau_meter_per_roll == self.yard_atau_meter and i.warehouse == self.warehouse and i.colour == self.colour and i.group == (self.group_prefix+"."+self.group_code) and i.inventory_uom == self.inventory_uom :
count = 1
else :
if i.item_code_variant == self.item_code_variant_depan and i.yard_atau_meter_per_roll == self.yard_atau_meter and i.warehouse == self.warehouse and i.colour == self.colour and i.inventory_uom == self.inventory_uom and self.group_code == "" :
count = 1
if count == 1 :
for i in self.packing_list_data :
if self.group_prefix and self.group_code :
if i.item_code_variant == self.item_code_variant_depan and i.yard_atau_meter_per_roll == self.yard_atau_meter and i.warehouse == self.warehouse and i.colour == self.colour and i.group == (self.group_prefix+"."+self.group_code) and i.inventory_uom == self.inventory_uom :
new_total_yard_atau_meter = i.total_yard_atau_meter
new_total_roll = i.total_roll
i.total_roll = new_total_roll + (self.qty_roll * -1)
i.total_yard_atau_meter = new_total_yard_atau_meter + (self.yard_atau_meter * self.qty_roll * -1)
else :
if i.item_code_variant == self.item_code_variant_depan and i.yard_atau_meter_per_roll == self.yard_atau_meter and i.warehouse == self.warehouse and i.colour == self.colour and i.inventory_uom == self.inventory_uom and self.group_code == "" :
new_total_yard_atau_meter = i.total_yard_atau_meter
new_total_roll = i.total_roll
i.total_roll = new_total_roll + (self.qty_roll * -1)
i.total_yard_atau_meter = new_total_yard_atau_meter + (self.yard_atau_meter * self.qty_roll * -1)
else :
if self.group_prefix and self.group_code :
pp_so = self.append('packing_list_data', {})
pp_so.item_code_variant = self.item_code_variant_depan
pp_so.yard_atau_meter_per_roll = self.yard_atau_meter
pp_so.total_yard_atau_meter = (self.yard_atau_meter * self.qty_roll * -1)
pp_so.total_roll = self.qty_roll * -1
pp_so.group = self.group_prefix+"."+self.group_code
pp_so.parent_item = parent_item
pp_so.item_name = item_name
pp_so.warehouse = self.warehouse
pp_so.colour = self.colour
pp_so.inventory_uom = self.inventory_uom
pp_so.keterangan_group = self.keterangan_group
else :
pp_so = self.append('packing_list_data', {})
pp_so.item_code_variant = self.item_code_variant_depan
pp_so.yard_atau_meter_per_roll = self.yard_atau_meter
pp_so.total_yard_atau_meter = (self.yard_atau_meter * self.qty_roll * -1)
pp_so.total_roll = self.qty_roll * -1
pp_so.parent_item = parent_item
pp_so.item_name = item_name
pp_so.warehouse = self.warehouse
pp_so.colour = self.colour
pp_so.inventory_uom = self.inventory_uom
else :
if self.group_prefix and self.group_code :
pp_so = self.append('packing_list_data', {})
pp_so.item_code_variant = self.item_code_variant_depan
pp_so.yard_atau_meter_per_roll = self.yard_atau_meter
pp_so.total_yard_atau_meter = (self.yard_atau_meter * self.qty_roll * -1)
pp_so.total_roll = self.qty_roll * -1
pp_so.group = self.group_prefix+"."+self.group_code
pp_so.parent_item = parent_item
pp_so.item_name = item_name
pp_so.warehouse = self.warehouse
pp_so.colour = self.colour
pp_so.inventory_uom = self.inventory_uom
pp_so.keterangan_group = self.keterangan_group
else :
pp_so = self.append('packing_list_data', {})
pp_so.item_code_variant = self.item_code_variant_depan
pp_so.yard_atau_meter_per_roll = self.yard_atau_meter
pp_so.total_yard_atau_meter = (self.yard_atau_meter * self.qty_roll * -1)
pp_so.total_roll = self.qty_roll * -1
pp_so.parent_item = parent_item
pp_so.item_name = item_name
pp_so.warehouse = self.warehouse
pp_so.colour = self.colour
pp_so.inventory_uom = self.inventory_uom
self.yard_atau_meter = 0
self.qty_roll = 1
self.colour = ""
else :
frappe.throw("Item Code / Colour / Warehouse / Yard / Meter tidak terisi")
else :
count = 0
if self.item_code_variant_depan and self.yard_atau_meter and self.colour and self.warehouse :
count = 0
master_item = frappe.get_doc("Item", self.item_code_variant_depan)
parent_item = master_item.variant_of
item_name = master_item.item_name
if self.packing_list_data :
if self.group_prefix and self.group_code :
for i in self.packing_list_data :
if i.item_code_variant == self.item_code_variant_depan and i.yard_atau_meter_per_roll == self.yard_atau_meter and i.warehouse == self.warehouse and i.colour == self.colour and i.group == (self.group_prefix+"."+self.group_code) and i.inventory_uom == self.inventory_uom :
count = 1
else :
t = 0
for i in self.packing_list_data :
if i.group :
t = 0
else :
if i.item_code_variant == self.item_code_variant_depan and i.yard_atau_meter_per_roll == self.yard_atau_meter and i.warehouse == self.warehouse and i.colour == self.colour and i.inventory_uom == self.inventory_uom :
count = 1
if count == 1 :
if self.group_prefix and self.group_code :
for i in self.packing_list_data :
if i.item_code_variant == self.item_code_variant_depan and i.yard_atau_meter_per_roll == self.yard_atau_meter and i.warehouse == self.warehouse and i.colour == self.colour and i.group == (self.group_prefix+"."+self.group_code) and i.inventory_uom == self.inventory_uom :
new_total_yard_atau_meter = i.total_yard_atau_meter
new_total_roll = i.total_roll
i.total_roll = new_total_roll + self.qty_roll
i.total_yard_atau_meter = new_total_yard_atau_meter + (self.yard_atau_meter * self.qty_roll)
else :
t = 0
for i in self.packing_list_data :
if i.group :
t = 0
else :
if i.item_code_variant == self.item_code_variant_depan and i.yard_atau_meter_per_roll == self.yard_atau_meter and i.warehouse == self.warehouse and i.colour == self.colour and i.inventory_uom == self.inventory_uom :
new_total_yard_atau_meter = i.total_yard_atau_meter
new_total_roll = i.total_roll
i.total_roll = new_total_roll + self.qty_roll
i.total_yard_atau_meter = new_total_yard_atau_meter + (self.yard_atau_meter * self.qty_roll)
else :
if self.group_prefix and self.group_code :
pp_so = self.append('packing_list_data', {})
pp_so.item_code_variant = self.item_code_variant_depan
pp_so.yard_atau_meter_per_roll = self.yard_atau_meter
pp_so.total_yard_atau_meter = (self.yard_atau_meter * self.qty_roll)
pp_so.total_roll = self.qty_roll
pp_so.group = self.group_prefix+"."+self.group_code
pp_so.parent_item = parent_item
pp_so.item_name = item_name
pp_so.warehouse = self.warehouse
pp_so.colour = self.colour
pp_so.inventory_uom = self.inventory_uom
pp_so.keterangan_group = self.keterangan_group
else :
pp_so = self.append('packing_list_data', {})
pp_so.item_code_variant = self.item_code_variant_depan
pp_so.yard_atau_meter_per_roll = self.yard_atau_meter
pp_so.total_yard_atau_meter = (self.yard_atau_meter * self.qty_roll)
pp_so.total_roll = self.qty_roll
pp_so.parent_item = parent_item
pp_so.item_name = item_name
pp_so.warehouse = self.warehouse
pp_so.colour = self.colour
pp_so.inventory_uom = self.inventory_uom
else :
if self.group_prefix and self.group_code :
pp_so = self.append('packing_list_data', {})
pp_so.item_code_variant = self.item_code_variant_depan
pp_so.yard_atau_meter_per_roll = self.yard_atau_meter
pp_so.total_yard_atau_meter = (self.yard_atau_meter * self.qty_roll)
pp_so.total_roll = self.qty_roll
pp_so.group = self.group_prefix+"."+self.group_code
pp_so.parent_item = parent_item
pp_so.item_name = item_name
pp_so.warehouse = self.warehouse
pp_so.colour = self.colour
pp_so.inventory_uom = self.inventory_uom
pp_so.keterangan_group = self.keterangan_group
else :
pp_so = self.append('packing_list_data', {})
pp_so.item_code_variant = self.item_code_variant_depan
pp_so.yard_atau_meter_per_roll = self.yard_atau_meter
pp_so.total_yard_atau_meter = (self.yard_atau_meter * self.qty_roll)
pp_so.total_roll = self.qty_roll
pp_so.parent_item = parent_item
pp_so.item_name = item_name
pp_so.warehouse = self.warehouse
pp_so.colour = self.colour
pp_so.inventory_uom = self.inventory_uom
self.yard_atau_meter = 0
self.qty_roll = 1
self.colour = ""
else :
frappe.throw("Item Code / Colour / Warehouse / Yard / Meter tidak terisi")
def add_pcs(self):
if self.is_return :
count = 0
if self.item_code_pcs and self.warehouse_pcs :
parent_item = frappe.get_doc("Item", self.item_code_pcs).variant_of
item_name = frappe.get_doc("Item", self.item_code_pcs).item_name
if self.packing_list_data_pcs :
for i in self.packing_list_data_pcs :
if i.item_code_pcs == self.item_code_pcs and i.warehouse_pcs == self.warehouse_pcs :
count = 1
if count == 1 :
for i in self.packing_list_data_pcs :
if i.item_code_pcs == self.item_code_pcs and i.warehouse_pcs == self.warehouse_pcs :
new_total_pcs = i.total_pcs
i.total_pcs = new_total_pcs + (self.qty_pcs *1)
else :
pp_so = self.append('packing_list_data_pcs', {})
pp_so.item_code_pcs = self.item_code_pcs
pp_so.total_pcs = self.qty_pcs *1
pp_so.parent_item_pcs = parent_item
pp_so.item_name_pcs = item_name
pp_so.warehouse_pcs = self.warehouse_pcs
pp_so.uom_pcs = self.uom_pcs
else :
pp_so = self.append('packing_list_data_pcs', {})
pp_so.item_code_pcs = self.item_code_pcs
pp_so.total_pcs = self.qty_pcs *1
pp_so.parent_item_pcs = parent_item
pp_so.item_name_pcs = item_name
pp_so.warehouse_pcs = self.warehouse_pcs
pp_so.uom_pcs = self.uom_pcs
self.qty_pcs = 0
else :
frappe.throw("Item Code / Warehouse tidak terisi")
else :
count = 0
if self.item_code_pcs and self.warehouse_pcs :
parent_item = frappe.get_doc("Item", self.item_code_pcs).variant_of
item_name = frappe.get_doc("Item", self.item_code_pcs).item_name
if self.packing_list_data_pcs :
for i in self.packing_list_data_pcs :
if i.item_code_pcs == self.item_code_pcs and i.warehouse_pcs == self.warehouse_pcs :
count = 1
if count == 1 :
for i in self.packing_list_data_pcs :
if i.item_code_pcs == self.item_code_pcs and i.warehouse_pcs == self.warehouse_pcs :
new_total_pcs = i.total_pcs
i.total_pcs = new_total_pcs + self.qty_pcs
else :
pp_so = self.append('packing_list_data_pcs', {})
pp_so.item_code_pcs = self.item_code_pcs
pp_so.total_pcs = self.qty_pcs
pp_so.parent_item_pcs = parent_item
pp_so.item_name_pcs = item_name
pp_so.warehouse_pcs = self.warehouse_pcs
pp_so.uom_pcs = self.uom_pcs
else :
pp_so = self.append('packing_list_data_pcs', {})
pp_so.item_code_pcs = self.item_code_pcs
pp_so.total_pcs = self.qty_pcs
pp_so.parent_item_pcs = parent_item
pp_so.item_name_pcs = item_name
pp_so.warehouse_pcs = self.warehouse_pcs
pp_so.uom_pcs = self.uom_pcs
self.qty_pcs = 1
else :
frappe.throw("Item Code / Warehouse tidak terisi")
@frappe.whitelist()
def submit_packing_list_receipt(doc,method):
# packing list data yard/meter
if doc.packing_list_data :
temp_gruop = []
g = ""
c = 0
panjang = len(doc.packing_list_data)
for data in doc.packing_list_data :
if data.group :
if data.group == g :
g = data.group
c = c + 1
else :
temp_gruop.append([data.group, data.inventory_uom, data.keterangan_group])
g = data.group
c = c + 1
if panjang == c :
temp_gruop.append([data.group, data.inventory_uom, data.keterangan_group])
for t in temp_gruop :
meow = 0
cek_group = frappe.db.sql("""
SELECT
mi.`group_code`
FROM `tabGroup Item` mi
WHERE mi.`group_code` = "{}"
""".format(t[0]))
if cek_group :
meow = 0
else :
mi = frappe.new_doc("Group Item")
mi.update({
"group_code": t[0],
"group_name": t[0],
"uom" : t[1],
"keterangan_group" : t[2],
"is_active": 1,
"packing_list_receipt" : doc.name
})
mi.flags.ignore_permissions = 1
mi.save()
count = 0
skip = 0
temp_group = []
temp_name_group = ""
for data in doc.packing_list_data :
if data.group :
if data.group == temp_name_group :
skip = 0
temp_name_group = data.group
else :
temp_group.append(data.group)
temp_name_group = data.group
cek_group = frappe.db.sql("""
SELECT
mi.`group_code`
FROM `tabGroup Item` mi
WHERE mi.`group_code` = "{}"
AND mi.`uom` = "{}"
""".format(data.group, data.inventory_uom))
if cek_group :
cek_data = frappe.db.sql("""
SELECT
di.`item_code_variant`
FROM `tabData Group` di
WHERE di.`item_code_variant` = "{}"
and di.`yard_atau_meter` = "{}"
and di.`colour` = "{}"
and di.`parent` = "{}"
""".format(data.item_code_variant, data.yard_atau_meter_per_roll, data.colour, data.group))
if cek_data :
count = 0
else :
mi = frappe.get_doc("Group Item", data.group)
mi.append("data_group", {
"doctype": "Data Group",
"item_code_variant" : data.item_code_variant,
"colour" : data.colour,
"yard_atau_meter" : data.yard_atau_meter_per_roll,
"parent_item" : data.parent_item,
"item_name" : data.item_name,
"warehouse" : data.warehouse,
"inventory_uom" : data.inventory_uom,
"total_qty_meter_atau_yard" : data.total_yard_atau_meter,
"total_qty_roll" : data.total_roll,
"packing_list_receipt" : doc.name
})
mi.flags.ignore_permissions = 1
mi.save()
# for i in temp_gruop :
# mi = frappe.get_doc("Group Item", i[0])
# mi.flags.ignore_permissions = 1
# mi.submit()
# for data in doc.packing_list_data :
# if data.group :
# if temp_group == data.group :
# temp_group = data.group
# else :
# temp_group = data.group
# mi = frappe.get_doc("Group Item", temp_group)
# mi.flags.ignore_permissions = 1
# mi.submit()
# else :
# mi = frappe.new_doc("Group Item")
# mi.update({
# "group_code": data.group,
# "group_name": data.group,
# "is_active": 1
# })
# item = frappe.get_doc("Item", data.parent_item)
# mi.append("data_group", {
# "doctype": "Data Group",
# "item_code_variant" : data.item_code_variant,
# "colour" : data.colour,
# "yard_atau_meter_per_roll" : data.yard_atau_meter_per_roll,
# "parent_item" : data.parent_item,
# "item_name" : data.item_name,
# "warehouse" : data.warehouse
# })
# mi.flags.ignore_permissions = 1
# mi.save()
# packing list data pcs
# if doc.packing_list_data_pcs :
# temp_gruop = []
# g = ""
# c = 0
# panjang = len(doc.packing_list_data_pcs)
# for data in doc.packing_list_data_pcs :
# if data.group_pcs == g :
# g = data.group_pcs
# c = c + 1
# else :
# temp_gruop.append(data.group_pcs)
# g = data.group_pcs
# c = c + 1
# if panjang == c :
# temp_gruop.append(data.group_pcs)
# for t in temp_gruop :
# meow = 0
# cek_group = frappe.db.sql("""
# SELECT
# mi.`group_code`
# FROM `tabGroup Item` mi
# WHERE mi.`group_code` = "{}"
# """.format(t[0]))
# if cek_group :
# meow = 0
# else :
# mi = frappe.new_doc("Group Item")
# mi.update({
# "group_code": t[0],
# "group_name": t[0],
# "uom" : t[1],
# "is_active": 1
# })
# mi.flags.ignore_permissions = 1
# mi.save()
# count = 0
# for data in doc.packing_list_data_pcs :
# if data.group_pcs :
# cek_group = frappe.db.sql("""
# SELECT
# mi.`group_code`
# FROM `tabGroup Item` mi
# WHERE mi.`group_code` = "{}"
# AND mi.`uom` = "{}"
# """.format(data.group_pcs, data.uom_pcs))
# if cek_group :
# cek_data = | |
<filename>check_invesalius_coord_coil.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# xcoord - Tools for cross-software spatial coordinate manipulation
#
# This file is part of xcoord package which is released under copyright.
# See file LICENSE or go to website for full license details.
# Copyright (C) 2018 <NAME> - All Rights Reserved
#
# Homepage: https://github.com/vhosouza/xcoord
# Contact: <EMAIL>
# License: MIT License
#
# Authors: <NAME>
# Date/version: 10.4.2019
import os
import nibabel as nb
import numpy as np
import transformations as tf
import vtk
def main():
SHOW_AXES = True
SHOW_SCENE_AXES = True
SHOW_COIL_AXES = True
SHOW_SKIN = True
SHOW_BRAIN = True
SHOW_COIL = True
SHOW_MARKERS = True
TRANSF_COIL = True
SHOW_PLANE = False
SELECT_LANDMARKS = 'scalp' # 'all', 'mri' 'scalp'
SAVE_ID = True
AFFINE_IMG = True
NO_SCALE = True
SCREENSHOT = False
SHOW_OTHER = False
reorder = [0, 2, 1]
flipx = [True, False, False]
# reorder = [0, 1, 2]
# flipx = [False, False, False]
# default folder and subject
# for Bert image use the translation in the base_affine (fall-back)
subj_list = ['VictorSouza', 'JaakkoNieminen', 'AinoTervo',
'JuusoKorhonen', 'BaranAydogan', 'AR', 'Bert']
subj = 0
data_dir = os.environ.get('OneDrive') + r'\vh\eventos\sf 2019\mri_science_factory\{}'.format(subj_list[subj])
# filenames
img_file = data_dir + r'\{}.nii'.format(subj_list[subj])
brain_file = data_dir + r'\gm.stl'
skin_file = data_dir + r'\gm_sn.stl'
if subj == 3:
other_file = data_dir + r'\gm.ply'
elif subj == 4:
other_file = data_dir + r'\tracks.vtp'
elif subj == 6:
other_file = data_dir + r'\gm.ply'
else:
other_file = data_dir + r'\gm.stl'
# coords = lc.load_nexstim(coord_file)
# red, green, blue, maroon (dark red),
# olive (shitty green), teal (petrol blue), yellow, orange
col = [[1., 0., 0.], [0., 1., 0.], [0., 0., 1.], [1., .0, 1.],
[.5, .5, 0.], [0., .5, .5], [1., 1., 0.], [1., .4, .0]]
# extract image header shape and affine transformation from original nifti file
imagedata = nb.squeeze_image(nb.load(img_file))
imagedata = nb.as_closest_canonical(imagedata)
imagedata.update_header()
pix_dim = imagedata.header.get_zooms()
img_shape = imagedata.header.get_data_shape()
print("Pixel size: \n")
print(pix_dim)
print("\nImage shape: \n")
print(img_shape)
print("\nSform: \n")
print(imagedata.get_qform(coded=True))
print("\nQform: \n")
print(imagedata.get_sform(coded=True))
print("\nFall-back: \n")
print(imagedata.header.get_base_affine())
scale_back, shear_back, angs_back, trans_back, persp_back = tf.decompose_matrix(imagedata.header.get_base_affine())
if AFFINE_IMG:
affine = imagedata.affine
# affine = imagedata.header.get_base_affine()
if NO_SCALE:
scale, shear, angs, trans, persp = tf.decompose_matrix(affine)
affine = tf.compose_matrix(scale=None, shear=shear, angles=angs, translate=trans, perspective=persp)
else:
affine = np.identity(4)
# affine_I = np.identity(4)
# create a camera, render window and renderer
camera = vtk.vtkCamera()
camera.SetPosition(0, 1000, 0)
camera.SetFocalPoint(0, 0, 0)
camera.SetViewUp(0, 0, 1)
camera.ComputeViewPlaneNormal()
camera.Azimuth(90.0)
camera.Elevation(10.0)
ren = vtk.vtkRenderer()
ren.SetActiveCamera(camera)
ren.ResetCamera()
ren.SetUseDepthPeeling(1)
ren.SetOcclusionRatio(0.1)
ren.SetMaximumNumberOfPeels(100)
camera.Dolly(1.5)
ren_win = vtk.vtkRenderWindow()
ren_win.AddRenderer(ren)
ren_win.SetSize(800, 800)
ren_win.SetMultiSamples(0)
ren_win.SetAlphaBitPlanes(1)
# create a renderwindowinteractor
iren = vtk.vtkRenderWindowInteractor()
iren.SetRenderWindow(ren_win)
# if SELECT_LANDMARKS == 'mri':
# # MRI landmarks
# coord_mri = [['Nose/Nasion'], ['Left ear'], ['Right ear'], ['Coil Loc'], ['EF max']]
# pts_ref = [1, 2, 3, 7, 10]
# elif SELECT_LANDMARKS == 'all':
# # all coords
# coord_mri = [['Nose/Nasion'], ['Left ear'], ['Right ear'], ['Nose/Nasion'], ['Left ear'], ['Right ear'],
# ['Coil Loc'], ['EF max']]
# pts_ref = [1, 2, 3, 5, 4, 6, 7, 10]
# elif SELECT_LANDMARKS == 'scalp':
# # scalp landmarks
# coord_mri = [['Nose/Nasion'], ['Left ear'], ['Right ear'], ['Coil Loc'], ['EF max']]
# hdr_mri = ['Nose/Nasion', 'Left ear', 'Right ear', 'Coil Loc', 'EF max']
# pts_ref = [5, 4, 6, 7, 10]
#
# coords_np = np.zeros([len(pts_ref), 3])
# for n, pts_id in enumerate(pts_ref):
# # to keep in the MRI space use the identity as the affine
# # coord_aux = n2m.coord_change(coords[pts_id][1:], img_shape, affine_I, flipx, reorder)
# # affine_trans = affine_I.copy()
# # affine_trans = affine.copy()
# # affine_trans[:3, -1] = affine[:3, -1]
# coord_aux = n2m.coord_change(coords[pts_id][1:], img_shape, affine, flipx, reorder)
# coords_np[n, :] = coord_aux
# [coord_mri[n].append(s) for s in coord_aux]
# if SHOW_MARKERS:
# marker_actor = add_marker(coord_aux, ren, col[n])
#
# print('\nOriginal coordinates from Nexstim: \n')
# [print(s) for s in coords]
# print('\nTransformed coordinates to MRI space: \n')
# [print(s) for s in coord_mri]
#
# # coil location, normal vector and direction vector
# coil_loc = coord_mri[-2][1:]
# coil_norm = coords[8][1:]
# coil_dir = coords[9][1:]
#
# # creating the coil coordinate system by adding a point in the direction of each given coil vector
# # the additional vector is just the cross product from coil direction and coil normal vectors
# # origin of the coordinate system is the coil location given by Nexstim
# # the vec_length is to allow line creation with visible length in VTK scene
# vec_length = 75
# p1 = coords[7][1:]
# p2 = [x + vec_length * y for x, y in zip(p1, coil_norm)]
# p2_norm = n2m.coord_change(p2, img_shape, affine, flipx, reorder)
#
# p2 = [x + vec_length * y for x, y in zip(p1, coil_dir)]
# p2_dir = n2m.coord_change(p2, img_shape, affine, flipx, reorder)
#
# coil_face = np.cross(coil_norm, coil_dir)
# p2 = [x - vec_length * y for x, y in zip(p1, coil_face.tolist())]
# p2_face = n2m.coord_change(p2, img_shape, affine, flipx, reorder)
# Coil face unit vector (X)
# u1 = np.asarray(p2_face) - np.asarray(coil_loc)
# u1_n = u1 / np.linalg.norm(u1)
# # Coil direction unit vector (Y)
# u2 = np.asarray(p2_dir) - np.asarray(coil_loc)
# u2_n = u2 / np.linalg.norm(u2)
# # Coil normal unit vector (Z)
# u3 = np.asarray(p2_norm) - np.asarray(coil_loc)
# u3_n = u3 / np.linalg.norm(u3)
#
# transf_matrix = np.identity(4)
# if TRANSF_COIL:
# transf_matrix[:3, 0] = u1_n
# transf_matrix[:3, 1] = u2_n
# transf_matrix[:3, 2] = u3_n
# transf_matrix[:3, 3] = coil_loc[:]
# the absolute value of the determinant indicates the scaling factor
# the sign of the determinant indicates how it affects the orientation: if positive maintain the
# original orientation and if negative inverts all the orientations (flip the object inside-out)'
# the negative determinant is what makes objects in VTK scene to become black
# print('Transformation matrix: \n', transf_matrix, '\n')
# print('Determinant: ', np.linalg.det(transf_matrix))
# if SAVE_ID:
# coord_dict = {'m_affine': transf_matrix, 'coords_labels': hdr_mri, 'coords': coords_np}
# io.savemat(output_file + '.mat', coord_dict)
# hdr_names = ';'.join(['m' + str(i) + str(j) for i in range(1, 5) for j in range(1, 5)])
# np.savetxt(output_file + '.txt', transf_matrix.reshape([1, 16]), delimiter=';', header=hdr_names)
if SHOW_BRAIN:
# brain_actor = load_stl(brain_file, ren, colour=[0., 1., 1.], opacity=0.7, user_matrix=np.linalg.inv(affine))
affine_orig = np.identity(4)
# affine_orig = affine.copy()
# affine_orig[0, 3] = affine_orig[0, 3] + pix_dim[0]*img_shape[0]
# affine_orig[1, 3] = affine_orig[1, 3] + pix_dim[1]*img_shape[1]
# affine_orig[0, 3] = affine_orig[0, 3] + pix_dim[0]*img_shape[0]
# affine_orig[0, 3] = affine_orig[0, 3] - 5
# this partially works for DTI Baran
# modified close to correct [-75.99139404 123.88291931 - 148.19839478]
# fall-back [87.50042766 - 127.5 - 127.5]
# affine_orig[0, 3] = -trans_back[0]
# affine_orig[1, 3] = -trans_back[1]
# this works for the bert image
# affine_orig[0, 3] = -127
# affine_orig[1, 3] = 127
# affine_orig[2, 3] = -127
# affine_orig[:3, :3] = affine[:3, :3]
# affine_orig[1, 3] = -affine_orig[1, 3]+27.5 # victorsouza
# affine_orig[1, 3] = -affine_orig[1, 3]+97.5
# affine_orig[1, 3] = -affine_orig[1, 3]
print('Affine original: \n', affine)
scale, shear, angs, trans, persp = tf.decompose_matrix(affine)
print('Angles: \n', np.rad2deg(angs))
print('Translation: \n', trans)
print('Affine modified: \n', affine_orig)
scale, shear, angs, trans, persp = tf.decompose_matrix(affine_orig)
print('Angles: \n', np.rad2deg(angs))
print('Translation: \n', trans)
# colour=[0., 1., 1.],
brain_actor, brain_mesh = load_stl(brain_file, ren, replace=True, colour=[1., 0., 0.],
opacity=.3, user_matrix=affine_orig)
# print('Actor origin: \n', brain_actor.GetPosition())
if SHOW_SKIN:
# skin_actor = load_stl(skin_file, ren, opacity=0.5, user_matrix=np.linalg.inv(affine))
# affine[0, 3] = affine[0, 3] + pix_dim[0] * img_shape[0]
# this is working
# affine[0, 3] = affine[0, 3] + 8.
affine[1, 3] = affine[1, 3] + pix_dim[1] * img_shape[1]
# affine[2, 3] = affine[2, 3] + pix_dim[2] * img_shape[2]
affine_inv = np.linalg.inv(affine)
# affine_inv[:3, 3] = -affine[:3, 3]
# affine_inv[2, 3] = -affine_inv[2, 3]
skin_actor, skin_mesh = load_stl(skin_file, ren, colour="SkinColor", opacity=1., user_matrix=affine_inv)
# skin_actor, skin_mesh = load_stl(skin_file, ren, colour="SkinColor", opacity=1.)
skino_actor, skino_mesh = load_stl(skin_file, ren, colour=[1., 0., 0.], opacity=1.)
if SHOW_OTHER:
# skin_actor = load_stl(skin_file, ren, opacity=0.5, user_matrix=np.linalg.inv(affine))
affine[1, 3] = affine[1, 3] + pix_dim[1] * img_shape[1]
affine_inv = np.linalg.inv(affine)
# affine_inv[:3, 3] = -affine[:3, 3]
affine_inv[1, 3] = | |
# encoding: utf-8
import urllib
import hmac
import base64
import hashlib
import requests
import traceback
from copy import copy
from datetime import datetime
from threading import Thread
from Queue import Queue, Empty
from multiprocessing.dummy import Pool
from time import sleep
import json
import zlib
# retrieve package: sudo pip install websocket websocket-client pathlib
from websocket import create_connection, _exceptions
# 常量定义
TIMEOUT = 5
HUOBI_API_HOST = "api.huobi.pro"
HADAX_API_HOST = "api.hadax.com"
LANG = 'zh-CN'
DEFAULT_GET_HEADERS = {
"Content-type": "application/x-www-form-urlencoded",
'Accept': 'application/json',
'Accept-Language': LANG,
'User-Agent':'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:53.0) Gecko/20100101 Firefox/53.0'
}
DEFAULT_POST_HEADERS = {
'Content-Type': 'application/json',
'Accept': 'application/json',
'Accept-Language': LANG,
'User-Agent':'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:53.0) Gecko/20100101 Firefox/53.0'
}
#----------------------------------------------------------------------
def createSign(params, method, host, path, secretKey):
"""创建签名"""
sortedParams = sorted(params.items(), key=lambda d: d[0], reverse=False)
encodeParams = urllib.urlencode(sortedParams)
payload = [method, host, path, encodeParams]
payload = '\n'.join(payload)
payload = payload.encode(encoding='UTF8')
secretKey = secretKey.encode(encoding='UTF8')
digest = hmac.new(secretKey, payload, digestmod=hashlib.sha256).digest()
signature = base64.b64encode(digest)
signature = signature.decode()
return signature
########################################################################
class TradeApi(object):
"""交易API"""
HUOBI = 'huobi'
HADAX = 'hadax'
SYNC_MODE = 'sync'
ASYNC_MODE = 'async'
#----------------------------------------------------------------------
def __init__(self):
"""Constructor"""
self.accessKey = ''
self.secretKey = ''
self.mode = self.ASYNC_MODE
self.active = False # API工作状态
self.reqid = 0 # 请求编号
self.queue = Queue() # 请求队列
self.pool = None # 线程池
#----------------------------------------------------------------------
def init(self, host, accessKey, secretKey, mode=None):
"""初始化"""
if host == self.HUOBI:
self.hostname = HUOBI_API_HOST
else:
self.hostname = HADAX_API_HOST
self.hosturl = 'https://%s' %self.hostname
self.accessKey = accessKey
self.secretKey = secretKey
if mode:
self.mode = mode
self.proxies = {
# "http" : "http://localhost:8118/",
# "https" : "http://localhost:8118/"
}
return True
#----------------------------------------------------------------------
def start(self, n=10):
"""启动"""
self.active = True
if self.mode == self.ASYNC_MODE:
self.pool = Pool(n)
self.pool.map_async(self.run, range(n))
#----------------------------------------------------------------------
def close(self):
"""停止"""
self.active = False
self.pool.close()
self.pool.join()
#----------------------------------------------------------------------
def httpGet(self, url, params):
"""HTTP GET"""
headers = copy(DEFAULT_GET_HEADERS)
postdata = urllib.urlencode(params)
try:
response = requests.get(url, postdata, headers=headers, proxies=self.proxies, timeout=TIMEOUT)
if response.status_code == 200:
return True, response.json()
else:
return False, u'GET请求失败,状态代码:%s' %response.status_code
except Exception as e:
return False, u'GET请求触发异常,原因:%s' %e
#----------------------------------------------------------------------
def httpPost(self, url, params, add_to_headers=None):
"""HTTP POST"""
headers = copy(DEFAULT_POST_HEADERS)
postdata = json.dumps(params)
try:
response = requests.post(url, postdata, headers=headers, proxies=self.proxies, timeout=TIMEOUT)
if response.status_code == 200:
return True, response.json()
else:
return False, u'POST请求失败,返回信息:%s' %response.json()
except Exception as e:
return False, u'POST请求触发异常,原因:%s' %e
#----------------------------------------------------------------------
def generateSignParams(self):
"""生成签名参数"""
timestamp = datetime.utcnow().strftime('%Y-%m-%dT%H:%M:%S')
d = {
'AccessKeyId': self.accessKey,
'SignatureMethod': 'HmacSHA256',
'SignatureVersion': '2',
'Timestamp': timestamp
}
return d
#----------------------------------------------------------------------
def apiGet(self, path, params):
"""API GET"""
method = 'GET'
params.update(self.generateSignParams())
params['Signature'] = createSign(params, method, self.hostname, path, self.secretKey)
url = self.hosturl + path
#print("url=%s, param:%s" % (url, params))
return self.httpGet(url, params)
#----------------------------------------------------------------------
def apiPost(self, path, params):
"""API POST"""
method = 'POST'
signParams = self.generateSignParams()
signParams['Signature'] = createSign(signParams, method, self.hostname, path, self.secretKey)
url = self.hosturl + path + '?' + urllib.urlencode(signParams)
return self.httpPost(url, params)
#----------------------------------------------------------------------
def addReq(self, path, params, func, callback):
"""添加请求"""
# 异步模式
if self.mode == self.ASYNC_MODE:
self.reqid += 1
req = (path, params, func, callback, self.reqid)
self.queue.put(req)
return self.reqid
# 同步模式
else:
return func(path, params)
#----------------------------------------------------------------------
def processReq(self, req):
"""处理请求"""
path, params, func, callback, reqid = req
result, data = func(path, params)
if result:
if data['status'] == 'ok':
callback(data['data'], reqid)
else:
msg = u'错误代码:%s,错误信息:%s' %(data['err-code'], data['err-msg'])
self.onError(msg, reqid)
else:
self.onError(data, reqid)
# 失败的请求重新放回队列,等待下次处理
self.queue.put(req)
#----------------------------------------------------------------------
def run(self, n):
"""连续运行"""
while self.active:
try:
req = self.queue.get(timeout=1)
self.processReq(req)
except Empty:
pass
#----------------------------------------------------------------------
def getSymbols(self):
"""查询合约代码"""
if self.hostname == HUOBI_API_HOST:
path = '/v1/common/symbols'
else:
path = '/v1/hadax/common/symbols'
params = {}
func = self.apiGet
callback = self.onGetSymbols
return self.addReq(path, params, func, callback)
#----------------------------------------------------------------------
def getCurrencys(self):
"""查询支持货币"""
if self.hostname == HUOBI_API_HOST:
path = '/v1/common/currencys'
else:
path = '/v1/hadax/common/currencys'
params = {}
func = self.apiGet
callback = self.onGetCurrencys
return self.addReq(path, params, func, callback)
#----------------------------------------------------------------------
def getTimestamp(self):
"""查询系统时间"""
path = '/v1/common/timestamp'
params = {}
func = self.apiGet
callback = self.onGetTimestamp
return self.addReq(path, params, func, callback)
#----------------------------------------------------------------------
def getAccounts(self):
"""查询账户"""
path = '/v1/account/accounts'
params = {}
func = self.apiGet
callback = self.onGetAccounts
return self.addReq(path, params, func, callback)
#----------------------------------------------------------------------
def getAccountBalance(self, accountid):
"""查询余额"""
if self.hostname == HUOBI_API_HOST:
path = '/v1/account/accounts/%s/balance' %accountid
else:
path = '/v1/hadax/account/accounts/%s/balance' %accountid
params = {}
func = self.apiGet
callback = self.onGetAccountBalance
return self.addReq(path, params, func, callback)
#----------------------------------------------------------------------
def getOrders(self, symbol, states, types=None, startDate=None,
endDate=None, from_=None, direct=None, size=None):
"""查询委托"""
path = '/v1/order/orders'
params = {
'symbol': symbol,
'states': states
}
if types:
params['types'] = types
if startDate:
params['start-date'] = startDate
if endDate:
params['end-date'] = endDate
if from_:
params['from'] = from_
if direct:
params['direct'] = direct
if size:
params['size'] = size
func = self.apiGet
callback = self.onGetOrders
return self.addReq(path, params, func, callback)
#----------------------------------------------------------------------
def getOpenOrders(self, accountId=None, symbol=None, side=None, size=None):
"""查询当前帐号下未成交订单
“account_id” 和 “symbol” 需同时指定或者二者都不指定。如果二者都不指定,返回最多500条尚未成交订单,按订单号降序排列。
"""
path = '/v1/order/openOrders'
params = { # initial with default required params
#'account_id': accountId,
#'symbol': symbol,
}
if symbol:
params['symbol'] = symbol
params['account_id'] = accountId
if side:
params['side'] = side
if size:
params['size'] = size
func = self.apiGet
callback = self.onGetOrders
return self.addReq(path, params, func, callback)
#----------------------------------------------------------------------
def getMatchResults(self, symbol, types=None, startDate=None,
endDate=None, from_=None, direct=None, size=None):
"""查询委托"""
path = '/v1/order/matchresults'
params = {
'symbol': symbol
}
if types:
params['types'] = types
if startDate:
params['start-date'] = startDate
if endDate:
params['end-date'] = endDate
if from_:
params['from'] = from_
if direct:
params['direct'] = direct
if size:
params['size'] = size
func = self.apiGet
callback = self.onGetMatchResults
return self.addReq(path, params, func, callback)
#----------------------------------------------------------------------
def getOrder(self, orderid):
"""查询某一委托"""
path = '/v1/order/orders/%s' %orderid
params = {}
func = self.apiGet
callback = self.onGetOrder
return self.addReq(path, params, func, callback)
#----------------------------------------------------------------------
def getMatchResult(self, orderid):
"""查询某一委托"""
path = '/v1/order/orders/%s/matchresults' %orderid
params = {}
func = self.apiGet
callback = self.onGetMatchResult
return self.addReq(path, params, func, callback)
#----------------------------------------------------------------------
def placeOrder(self, accountid, amount, symbol, type_, price=None, source=None):
"""下单"""
if self.hostname == HUOBI_API_HOST:
path = '/v1/order/orders/place'
else:
path = '/v1/hadax/order/orders/place'
params = {
'account-id': accountid,
'amount': amount,
'symbol': symbol,
'type': type_
}
if price:
params['price'] = price
if source:
params['source'] = source
func = self.apiPost
callback = self.onPlaceOrder
return self.addReq(path, params, func, callback)
#----------------------------------------------------------------------
def cancelOrder(self, orderid):
"""撤单"""
path = '/v1/order/orders/%s/submitcancel' %orderid
params = {}
func = self.apiPost
callback = self.onCancelOrder
return self.addReq(path, params, func, callback)
#----------------------------------------------------------------------
def batchCancel(self, orderids):
"""批量撤单"""
path = '/v1/order/orders/batchcancel'
params = {
'order-ids': orderids
}
func = self.apiPost
callback = self.onBatchCancel
return self.addReq(path, params, func, callback)
#----------------------------------------------------------------------
def onError(self, msg, reqid):
"""错误回调"""
print(msg, reqid)
#----------------------------------------------------------------------
def onGetSymbols(self, data, reqid):
"""查询代码回调"""
#print reqid, data
for d in data:
print(d)
#----------------------------------------------------------------------
def onGetCurrencys(self, data, reqid):
"""查询货币回调"""
print (reqid, data)
#----------------------------------------------------------------------
def onGetTimestamp(self, data, reqid):
"""查询时间回调"""
print (reqid, data)
#----------------------------------------------------------------------
def onGetAccounts(self, data, reqid):
"""查询账户回调"""
print (reqid, data)
#----------------------------------------------------------------------
def onGetAccountBalance(self, data, reqid):
"""查询余额回调"""
print (reqid, data)
for d in data['data']['list']:
print (d)
#----------------------------------------------------------------------
def onGetOrders(self, data, reqid):
"""查询委托回调"""
print (reqid, data)
#----------------------------------------------------------------------
def onGetMatchResults(self, data, reqid):
"""查询成交回调"""
print (reqid, data)
#----------------------------------------------------------------------
def onGetOrder(self, data, reqid):
"""查询单一委托回调"""
print (reqid, data)
#----------------------------------------------------------------------
def onGetMatchResult(self, data, reqid):
"""查询单一成交回调"""
print (reqid, data)
#----------------------------------------------------------------------
def onPlaceOrder(self, data, reqid):
"""委托回调"""
print (reqid, data)
#----------------------------------------------------------------------
def onCancelOrder(self, data, reqid):
"""撤单回调"""
print (reqid, data)
#----------------------------------------------------------------------
def onBatchCancel(self, data, reqid):
"""批量撤单回调"""
print (reqid, data)
########################################################################
class DataApi(object):
"""行情接口
https://github.com/huobiapi/API_Docs/wiki/WS_request
"""
HUOBI = 'huobi'
HADAX = 'hadax'
#----------------------------------------------------------------------
def __init__(self):
"""Constructor"""
self.ws = None
self.url = ''
self.reqid = 0
self.active = False
self.thread = Thread(target=self.run)
self.subDict = {}
self.url = ''
self.proxies = {}
self.proxyHost =None
#----------------------------------------------------------------------
def init(self, exchHost, proxyHost=None, proxyPort=0):
"""初始化"""
if exchHost == self.HUOBI:
hostname = HUOBI_API_HOST
else:
hostname = HADAX_API_HOST
self.url = 'wss://%s/ws' % hostname
if proxyHost :
self.proxyHost = proxyHost
self.proxyPort = | |
# python3
# Copyright 2021 InstaDeep Ltd. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""MADQN trainer implementation."""
import copy
from typing import Any, Callable, Dict, List, Optional, Sequence, Tuple, Union
import numpy as np
import reverb
import sonnet as snt
import tensorflow as tf
import tree
import trfl
from acme.tf import utils as tf2_utils
from acme.utils import loggers
import mava
from mava import types as mava_types
from mava.systems.tf.variable_utils import VariableClient
from mava.utils import training_utils as train_utils
from mava.utils.sort_utils import sort_str_num
train_utils.set_growing_gpu_memory()
class MADQNTrainer(mava.Trainer):
"""MADQN trainer.
This is the trainer component of a MADQN system. IE it takes a dataset as input
and implements update functionality to learn from this dataset.
"""
def __init__(
self,
agents: List[str],
agent_types: List[str],
value_networks: Dict[str, snt.Module],
target_value_networks: Dict[str, snt.Module],
optimizer: Union[snt.Optimizer, Dict[str, snt.Optimizer]],
discount: float,
target_averaging: bool,
target_update_period: int,
target_update_rate: float,
dataset: tf.data.Dataset,
observation_networks: Dict[str, snt.Module],
target_observation_networks: Dict[str, snt.Module],
variable_client: VariableClient,
counts: Dict[str, Any],
agent_net_keys: Dict[str, str],
max_gradient_norm: float = None,
logger: loggers.Logger = None,
learning_rate_scheduler_fn: Optional[Dict[str, Callable[[int], None]]] = None,
):
"""Initialise MADQN trainer.
Args:
agents: agent ids, e.g. "agent_0".
agent_types: agent types, e.g. "speaker" or "listener".
value_networks: value networks for each agents in
the system.
target_value_networks: target value networks.
optimizer: optimizer(s) for updating policy networks.
discount: discount factor for TD updates.
target_averaging: whether to use polyak averaging for target network
updates.
target_update_period: number of steps before target networks are
updated.
target_update_rate: update rate when using averaging.
dataset: training dataset.
observation_networks: network for feature
extraction from raw observation.
target_observation_networks: target observation
network.
variable_client: The client used to manage the variables.
counts: step counter object.
agent_net_keys: specifies what network each agent uses.
max_gradient_norm: maximum allowed norm for gradients
before clipping is applied.
logger: logger object for logging trainer
statistics.
learning_rate_scheduler_fn: dict with two functions (one for the policy and
one for the critic optimizer), that takes in a trainer step t and
returns the current learning rate.
"""
self._agents = agents
self._agent_types = agent_types
self._agent_net_keys = agent_net_keys
self._variable_client = variable_client
self._learning_rate_scheduler_fn = learning_rate_scheduler_fn
# Setup counts
self._counts = counts
# Store online and target networks.
self._value_networks = value_networks
self._target_value_networks = target_value_networks
# Ensure obs and target networks are sonnet modules
self._observation_networks = {
k: tf2_utils.to_sonnet_module(v) for k, v in observation_networks.items()
}
self._target_observation_networks = {
k: tf2_utils.to_sonnet_module(v)
for k, v in target_observation_networks.items()
}
# General learner book-keeping and loggers.
self._logger = logger or loggers.make_default_logger("trainer")
# Other learner parameters.
self._discount = discount
# Set up gradient clipping.
if max_gradient_norm is not None:
self._max_gradient_norm = tf.convert_to_tensor(max_gradient_norm)
else: # A very large number. Infinity results in NaNs.
self._max_gradient_norm = tf.convert_to_tensor(1e10)
# Necessary to track when to update target networks.
self._num_steps = tf.Variable(0, dtype=tf.int32)
self._target_averaging = target_averaging
self._target_update_period = target_update_period
self._target_update_rate = target_update_rate
# Create an iterator to go through the dataset.
self._iterator = iter(dataset) # pytype: disable=wrong-arg-types
# Dictionary with unique network keys.
self.unique_net_keys = sort_str_num(self._value_networks.keys())
# Get the agents which shoud be updated and ran
self._trainer_agent_list = self._agents
# Create optimizers for different agent types.
if not isinstance(optimizer, dict):
self._optimizers: Dict[str, snt.Optimizer] = {}
for agent in self.unique_net_keys:
self._optimizers[agent] = copy.deepcopy(optimizer)
else:
self._optimizers = optimizer
# Expose the variables.
self._system_network_variables: Dict[str, Dict[str, snt.Module]] = {
"observations": {},
"values": {},
}
for agent_key in self.unique_net_keys:
self._system_network_variables["observations"][
agent_key
] = self._target_observation_networks[agent_key].variables
self._system_network_variables["values"][agent_key] = self._value_networks[
agent_key
].variables
# Do not record timestamps until after the first learning step is done.
# This is to avoid including the time it takes for actors to come online and
# fill the replay buffer.
self._timestamp: Optional[float] = None
def _update_target_networks(self) -> None:
"""Update the target networks.
Using either target averaging or
by directy copying the weights of the online networks every few steps.
"""
for key in self.unique_net_keys:
# Update target network.
online_variables = (
*self._observation_networks[key].variables,
*self._value_networks[key].variables,
)
target_variables = (
*self._target_observation_networks[key].variables,
*self._target_value_networks[key].variables,
)
if self._target_averaging:
assert 0.0 < self._target_update_rate < 1.0
tau = self._target_update_rate
for src, dest in zip(online_variables, target_variables):
dest.assign(dest * (1.0 - tau) + src * tau)
else:
# Make online -> target network update ops.
if tf.math.mod(self._num_steps, self._target_update_period) == 0:
for src, dest in zip(online_variables, target_variables):
dest.assign(src)
self._num_steps.assign_add(1)
def get_variables(self, names: Sequence[str]) -> Dict[str, Dict[str, np.ndarray]]:
"""Depricated"""
pass
def _transform_observations(
self, obs: Dict[str, mava_types.OLT], next_obs: Dict[str, mava_types.OLT]
) -> Tuple[Dict[str, np.ndarray], Dict[str, np.ndarray]]:
"""Transform the observations using the observation networks of each agent.
We assume the observation network is non-recurrent.
Args:
obs: observations at timestep t-1
next_obs: observations at timestep t
Returns:
Transformed observations
"""
o_tm1 = {}
o_t = {}
for agent in self._agents:
agent_key = self._agent_net_keys[agent]
o_tm1[agent] = self._observation_networks[agent_key](obs[agent].observation)
o_t[agent] = self._target_observation_networks[agent_key](
next_obs[agent].observation
)
# This stop_gradient prevents gradients to propagate into the target
# observation network. In addition, since the online policy network is
# evaluated at o_t, this also means the policy loss does not influence
# the observation network training.
o_t[agent] = tree.map_structure(tf.stop_gradient, o_t[agent])
return o_tm1, o_t
@tf.function
def _step(
self,
) -> Dict[str, Dict[str, Any]]:
"""Trainer step.
Returns:
losses
"""
# Draw a batch of data from replay.
sample: reverb.ReplaySample = next(self._iterator)
# Compute loss
self._forward(sample)
# Compute and apply gradients
self._backward()
# Update the target networks
self._update_target_networks()
# Log losses per agent
return train_utils.map_losses_per_agent_value(self.value_losses)
def _forward(self, inputs: reverb.ReplaySample) -> None:
"""Trainer forward pass.
Args:
inputs: input data from the data table (transitions)
"""
# Unpack input data as follows:
# o_tm1 = dictionary of observations one for each agent
# a_tm1 = dictionary of actions taken from obs in o_tm1
# e_tm1 [Optional] = extra data for timestep t-1
# that the agents persist in replay.
# r_t = dictionary of rewards or rewards sequences
# (if using N step transitions) ensuing from actions a_tm1
# d_t = environment discount ensuing from actions a_tm1.
# This discount is applied to future rewards after r_t.
# o_t = dictionary of next observations or next observation sequences
# e_t [Optional] = extra data for timestep t that the agents persist in replay.
trans = mava_types.Transition(*inputs.data)
o_tm1, o_t, a_tm1, r_t, d_t, _, _ = (
trans.observations,
trans.next_observations,
trans.actions,
trans.rewards,
trans.discounts,
trans.extras,
trans.next_extras,
)
self.value_losses = {}
# Do forward passes through the networks and calculate the losses
with tf.GradientTape(persistent=True) as tape:
o_tm1_trans, o_t_trans = self._transform_observations(o_tm1, o_t)
for agent in self._trainer_agent_list:
agent_key = self._agent_net_keys[agent]
# Double Q-learning
q_tm1 = self._value_networks[agent_key](o_tm1_trans[agent])
q_t_value = self._target_value_networks[agent_key](o_t_trans[agent])
q_t_selector = self._value_networks[agent_key](o_t_trans[agent])
# Legal action masking
q_t_selector = tf.where(
tf.cast(o_t[agent].legal_actions, "bool"), q_t_selector, -999999999
)
# pcont
discount = tf.cast(self._discount, dtype=d_t[agent].dtype)
# Value loss.
value_loss, _ = trfl.double_qlearning(
q_tm1,
a_tm1[agent],
r_t[agent],
discount * d_t[agent],
q_t_value,
q_t_selector,
)
self.value_losses[agent] = tf.reduce_mean(value_loss, axis=0)
self.tape = tape
def _backward(self) -> None:
"""Trainer backward pass updating network parameters"""
# Calculate the gradients and update the networks
value_losses = self.value_losses
tape = self.tape
for agent in self._trainer_agent_list:
agent_key = self._agent_net_keys[agent]
# Get trainable variables.
variables = (
self._observation_networks[agent_key].trainable_variables
+ self._value_networks[agent_key].trainable_variables
)
# Compute gradients.
# Note: Warning "WARNING:tensorflow:Calling GradientTape.gradient
# on a persistent tape inside its context is significantly less efficient
# than calling it outside the context." caused by losses.dpg, which calls
# tape.gradient.
gradients = tape.gradient(value_losses[agent], variables)
# Maybe clip gradients.
gradients = tf.clip_by_global_norm(gradients, self._max_gradient_norm)[0]
# Apply gradients.
self._optimizers[agent_key].apply(gradients, variables)
train_utils.safe_del(self, "tape")
def step(self) -> None:
"""Trainer step to update the parameters of the agents in the system"""
raise NotImplementedError("A trainer statistics wrapper should overwrite this.")
def after_trainer_step(self) -> None:
"""Optionally decay lr after every training step."""
if self._learning_rate_scheduler_fn:
self._decay_lr(self._num_steps)
info: Dict[str, Dict[str, float]] = {}
for agent in self._agents:
info[agent] = {}
info[agent]["learning_rate"] = self._optimizers[
self._agent_net_keys[agent]
].learning_rate
if self._logger:
self._logger.write(info)
def | |
<filename>unused/cv/support/ElectopiaLearning.py<gh_stars>0
# All rights reserved by forest fairy.
# You cannot modify or share anything without sacrifice.
# If you don't agree, keep calm and don't look at code bellow!
__author__ = "VirtualV <https://github.com/virtualvfix>"
__date__ = "$Apr 28, 2016 12:18:18 PM$"
import os
import sys
import cv2
import numpy as np
import cv2.cv as cv
from time import time, sleep
CURRENT_DIR = os.path.dirname(os.path.realpath(__file__)) + os.sep
IMAGE_PATH = 'D:\\screenshots' + os.sep
IMAGE_NAME = 'electopia'
IMAGE_CHANGE_TIME = 0.1
IMAGE_SCALE = 0.3
WIDTH,HEIGHT=1,0
X,Y=0,1
SAMPLE_SIZE = 32
class CVBasemarkX(object):
""" Script for lerning digit recognition.
Use left mouse key for select area on image.
This area with relative coordinates will be printed on image in "SELECT" tag.
Use 0-9 keys (not numpad) to add responce for current selected area (area will be saved as sample).
Responces and samples will be saved to script directory after exit via Esc key.
Keys:
ESC - exit.
SpaceBar - skip image.
0-9 - digit on slected area (responce for recognition).
Backspace - delete previuos responce.
Enter - skip selected sample. """
def __init__(self):
self.image_list = []
for dirs,subs,files in os.walk(IMAGE_PATH):
for file in files:
if IMAGE_NAME.lower() in file.lower():
self.image_list.append(file)
self.textarray = [('image', None),
('image count', None),
('select', None)]
self.digit_area = [(0.277, 0.267), (0.659, 0.738)]
self.digit_model = None
if os.path.exists(CURRENT_DIR + IMAGE_NAME.lower() + 'knsamples.data'):
self.digit_model = cv2.KNearest()
samples = np.loadtxt(CURRENT_DIR + IMAGE_NAME.lower() + 'knsamples.data',np.float32)
responses = np.loadtxt(CURRENT_DIR + IMAGE_NAME.lower() + 'knresponses.data',np.float32)
self.digit_model.train(samples, responses)
def editText(self, name, text):
""" edit text to screen """
for i in range(0, len(self.textarray), 1):
if name in self.textarray[i]:
self.textarray[i] = (self.textarray[i][0], text)
return
self.textarray.append((name, text))
def drawText(self, image, fontScale = 0.5, color = (255,255,255)):
i = 15
for x in self.textarray:
cv2.putText(image, '{0}: {1}'.format(x[0].upper(), x[1]), (10, i), fontFace = cv2.FONT_HERSHEY_DUPLEX, fontScale = fontScale, color=color)
i += 15
def relative(self, x,y):
# mouse relative coordinate
accuracy = 3
return (round(float(x)/self.width, accuracy), round(float(y)/self.height, accuracy))
def getSample(self, threshold, sample_area, current_selection):
""" get image sample, resize it and convert it to full binary image """
sample = threshold[sample_area[current_selection][Y]:sample_area[current_selection][Y]+sample_area[current_selection][3],\
sample_area[current_selection][X]:sample_area[current_selection][X]+sample_area[current_selection][2]]
sample = cv2.resize(sample, (SAMPLE_SIZE, SAMPLE_SIZE))
# convert image to read binarry (only 0 or 255 color value)
for i in xrange(len(sample)):
for j in xrange(len(sample[i])):
if sample[i][j] < 255/2:
sample[i][j] = 0
else: sample[i][j] = 255
return sample
def recognition(self, samples, nearest=1):
""" recognition digit by sample. Retrun digits as string """
if self.digit_model is None: return -1
string = ''
for i in xrange(len(samples)):
sample = samples[i].reshape((1,samples[i].size))
sample = np.float32(sample)
retval, results, neigh_resp, dists = self.digit_model.find_nearest(sample, k=nearest)
d = int(results[0][0])
if d == 46: d = '.'
string += str(d)
return string if string != '' else -1
def main(self):
# samples = np.empty((0, SAMPLE_SIZE*SAMPLE_SIZE)) # 32x32 samples size
samples = []
responses = []
try:
self.drawing = False # true if mouse is pressed
self.mouse_rectangle = None
self.width, self.height = 0, 0
# mouse callback function for draw rectangle
def draw_rectangle(event, x, y, flags, param):
if event == cv2.EVENT_LBUTTONDOWN:
self.drawing = True
self.mouse_rectangle = [self.relative(x,y), self.relative(x,y)]
elif event == cv2.EVENT_MOUSEMOVE:
if self.drawing == True:
self.mouse_rectangle[1] = self.relative(x,y)
elif event == cv2.EVENT_LBUTTONUP:
self.drawing = False
self.mouse_rectangle[1] = self.relative(x,y)
self.editText('select', self.mouse_rectangle)
print self.mouse_rectangle
# crate window
cv2.namedWindow(IMAGE_NAME)
cv2.namedWindow(IMAGE_NAME + '2')
cv2.namedWindow('SAMPLE')
cv2.setMouseCallback(IMAGE_NAME, draw_rectangle)
current_img = -1
current_time = time()
just_start = True
current_selection = -1 # selected area
digit = -1 # selected digit
while True:
# load image by time or current selection
if (time()-current_time >= IMAGE_CHANGE_TIME or just_start) and current_selection == -1:
current_selection = 0
just_start = False
# load image
frame = cv2.imread(IMAGE_PATH + self.image_list[current_img%len(self.image_list)])
print frame.shape[HEIGHT], frame.shape[WIDTH], self.image_list[current_img%len(self.image_list)]
# resize
frame = cv2.resize(frame, (int(frame.shape[WIDTH]/IMAGE_SCALE), int(frame.shape[HEIGHT]/IMAGE_SCALE)))
# crop image
self.digit_area = [(0.277, 0.267), (0.659, 0.738)]
frame = frame[int(frame.shape[HEIGHT]*self.digit_area[0][Y]):int(frame.shape[HEIGHT]*self.digit_area[1][Y]),
int(frame.shape[WIDTH]*self.digit_area[0][X]):int(frame.shape[WIDTH]*self.digit_area[1][X])]
# source_frame = np.copy(frame)
# find score filed
image = np.copy(frame)
image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
# threshold = cv2.threshold(image2, 90, 256, cv2.THRESH_TOZERO)[1]
threshold = cv2.threshold(image, 90, 256, cv2.THRESH_BINARY_INV)[1]
# find max contour
contours, hierarchy = cv2.findContours(threshold, cv2.RETR_CCOMP , cv2.CHAIN_APPROX_SIMPLE)
maxArea = 0
maxContour = None
for cont in contours:
_area = cv2.contourArea(cont)#np.int0(cv2.cv.BoxPoints(cv2.minAreaRect(cont))))
if _area > maxArea:
maxArea = _area
maxContour = cont
# get crop area
x,y,w,h = cv2.boundingRect(maxContour)
self.digit_area = [(float(x)/frame.shape[WIDTH], float(y)/frame.shape[HEIGHT]), \
(float(x+w)/frame.shape[WIDTH], float(y+h)/frame.shape[HEIGHT])]
# crop again
frame = frame[int(frame.shape[HEIGHT]*self.digit_area[0][Y]):int(frame.shape[HEIGHT]*self.digit_area[1][Y]),
int(frame.shape[WIDTH]*self.digit_area[0][X]):int(frame.shape[WIDTH]*self.digit_area[1][X])]
# crop again
self.digit_area = [(0.7, 0.16), (0.982, 0.684)]#[(0.046, 0.145), (0.963, 0.702)]
# crop again
frame = frame[int(frame.shape[HEIGHT]*self.digit_area[0][Y]):int(frame.shape[HEIGHT]*self.digit_area[1][Y]),
int(frame.shape[WIDTH]*self.digit_area[0][X]):int(frame.shape[WIDTH]*self.digit_area[1][X])]
self.height, self.width = frame.shape[:2]#int(frame.shape[0]/IMAGE_SCALE), int(frame.shape[1]/IMAGE_SCALE)
current_img += 1
current_time = time()
self.editText('image', self.image_list[current_img%len(self.image_list)])
self.editText('image count', str(current_img)+'/'+str(len(self.image_list)))
#===============================================================
image = np.copy(frame)
image2 = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
# image = cv2.GaussianBlur(np.copy(image),(5,5),0)
# crop image
# threshold = cv2.inRange(cv2.cvtColor(image, cv2.COLOR_RGB2HSV), cv.CV_RGB(150,0,0), cv.CV_RGB(256, 150, 180))
# binary crop image
threshold = cv2.threshold(image2, 90, 256, cv2.THRESH_TOZERO)[1]
# threshold = cv2.threshold(image2, 90, 256, cv2.THRESH_BINARY)[1]
contours, hierarchy = cv2.findContours(np.copy(threshold), cv2.RETR_CCOMP , cv2.CHAIN_APPROX_SIMPLE)
hierarchy = hierarchy[0] # get the actual inner list of hierarchy descriptions
# For each contour, find the bounding rectangle and draw it
_smpl_area = [] # keep symbol rect
for component in zip(contours, hierarchy):
currentContour = component[0]
currentHierarchy = component[1]
x,y,w,h = cv2.boundingRect(currentContour)
if currentHierarchy[2] > 0 or currentHierarchy[3] < 0:
_smpl_area.append((x,y,w,h))
cv2.rectangle(image, (x,y), (x+w,y+h), (0,255,0), 1) # parent contour
else:
cv2.rectangle(image, (x,y), (x+w,y+h), (0,0,255), 1) # child contour
_smpl_area = sorted(_smpl_area, key=lambda x:x[1]) # sort by Y
# group by line and sort by X
sample_area = []
sample_area_group = []
i = 0
while i < len(_smpl_area):
_yh = _smpl_area[i][1]+_smpl_area[i][3] # y+h
_delta = _smpl_area[i][3]*0.1 # 10% of y+h
tm = [_smpl_area[i]]
for j in range(i+1, len(_smpl_area), 1):
_yh_next = _smpl_area[j][1]+_smpl_area[j][3]
if _yh+_delta >= _yh_next >= _yh-_delta:
tm.append(_smpl_area[j])
else: break
i += 1 if len(tm) == 0 else len(tm)
for x in sorted(tm, key=lambda x:x[0]): sample_area.append(x)
sample_area_group.append(sorted(tm, key=lambda x:x[0])) # sort by X
# get sample of symbol
if current_selection < len(sample_area):
sample = self.getSample(threshold, sample_area, current_selection)
# recognition image if available
for i in xrange(len(sample_area_group)):
recog_smpls = []
for j in xrange(len(sample_area_group[i])):
recog_smpls.append(self.getSample(threshold, sample_area_group[i], j))
if i<5: self.editText('digit'+str(i), self.recognition(recog_smpls))
cv2.rectangle(image, (sample_area[current_selection][X],sample_area[current_selection][Y]), \
(sample_area[current_selection][X]+sample_area[current_selection][2],sample_area[current_selection][Y]+sample_area[current_selection][3]), (255,0,255), 2) # draw selected area
#===============================================================
# draw mouse rectangle
if not self.mouse_rectangle is None:
cv2.rectangle(image, (int(self.mouse_rectangle[0][X]*self.width),int(self.mouse_rectangle[0][Y]*self.height)), \
(int(self.mouse_rectangle[1][X]*self.width),int(self.mouse_rectangle[1][Y]*self.height)), \
thickness = 1, color = (0,255,255))
self.editText('prev press', digit)
self.drawText(image, color = (255,255,0))
cv2.imshow(IMAGE_NAME, image)
cv2.imshow(IMAGE_NAME+'2', threshold)
cv2.imshow('SAMPLE', sample)
# key event
key = cv2.waitKey(1)
if key == 27: break # esc
elif key == 13: # enter
current_selection += 1
if current_selection >= len(sample_area): current_selection = -1
elif key == 32: # space
current_selection = -1
elif key == 8: # backspace # delete previous digit
current_selection -= 1
responses.pop()
samples.pop()
elif 57 >= key >=48 or key == 46: # digits and point
digit = int(chr(key)) if key != 46 else key
current_selection += 1
# keep learning sample
sample = sample.reshape((1,sample.size))
sample = np.float32(sample)
# samples = np.append(samples,sample,0)
samples.append(sample)
# keep lerning responce
responses.append(digit)
if current_selection >= len(sample_area): current_selection = -1
elif key >= 0: print 'KEY PRESSED: ' + str(key) + ' - ' + chr(key)
finally:
cv2.destroyAllWindows()
if len(responses) > 0:
print "Training complete ^-^"
# order lerning results by responces
order = sorted(zip(responses, samples), key=lambda x:x[0])
responses, samples = zip(*order)
# convert samples to numpy array
samples_save = np.empty((0, SAMPLE_SIZE*SAMPLE_SIZE), dtype=np.float32) # samples size
for x in samples: samples_save = np.append(samples_save,x,0)
# convert responces to numpy array
responses = np.array(responses, dtype=np.float32)
responses = responses.reshape((responses.size,1))
# np.savetxt(CURRENT_DIR + IMAGE_NAME.lower() + 'knsamples.data', samples)
np.savetxt(CURRENT_DIR + IMAGE_NAME.lower() + 'knsamples.data', samples_save)
np.savetxt(CURRENT_DIR + IMAGE_NAME.lower() + 'knresponses.data', responses)
if __name__ == "__main__":
print 'Use left mouse key for select area on image. This area with relative coordinates will be printed on image in "SELECT" tag.\nUse 0-9 keys (not numpad) to add responce for current selected area (area will be saved as sample).\nResponces and samples will be saved to script directory after exit via Esc key.\nKeys: \nESC | |
Format defined by RFC3339.
"""
return pulumi.get(self, "time_updated")
@property
@pulumi.getter
def type(self) -> str:
"""
Type of Responder
"""
return pulumi.get(self, "type")
@pulumi.output_type
class GetResponderRecipeEffectiveResponderRuleDetailsResult(dict):
def __init__(__self__, *,
condition: str,
configurations: Sequence['outputs.GetResponderRecipeEffectiveResponderRuleDetailsConfigurationResult'],
is_enabled: bool,
mode: str):
"""
:param Sequence['GetResponderRecipeEffectiveResponderRuleDetailsConfigurationArgs'] configurations: ResponderRule configurations
:param bool is_enabled: Identifies state for ResponderRule
:param str mode: Execution Mode for ResponderRule
"""
pulumi.set(__self__, "condition", condition)
pulumi.set(__self__, "configurations", configurations)
pulumi.set(__self__, "is_enabled", is_enabled)
pulumi.set(__self__, "mode", mode)
@property
@pulumi.getter
def condition(self) -> str:
return pulumi.get(self, "condition")
@property
@pulumi.getter
def configurations(self) -> Sequence['outputs.GetResponderRecipeEffectiveResponderRuleDetailsConfigurationResult']:
"""
ResponderRule configurations
"""
return pulumi.get(self, "configurations")
@property
@pulumi.getter(name="isEnabled")
def is_enabled(self) -> bool:
"""
Identifies state for ResponderRule
"""
return pulumi.get(self, "is_enabled")
@property
@pulumi.getter
def mode(self) -> str:
"""
Execution Mode for ResponderRule
"""
return pulumi.get(self, "mode")
@pulumi.output_type
class GetResponderRecipeEffectiveResponderRuleDetailsConfigurationResult(dict):
def __init__(__self__, *,
config_key: str,
name: str,
value: str):
"""
:param str config_key: Unique name of the configuration
:param str name: configuration name
:param str value: configuration value
"""
pulumi.set(__self__, "config_key", config_key)
pulumi.set(__self__, "name", name)
pulumi.set(__self__, "value", value)
@property
@pulumi.getter(name="configKey")
def config_key(self) -> str:
"""
Unique name of the configuration
"""
return pulumi.get(self, "config_key")
@property
@pulumi.getter
def name(self) -> str:
"""
configuration name
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def value(self) -> str:
"""
configuration value
"""
return pulumi.get(self, "value")
@pulumi.output_type
class GetResponderRecipeResponderRuleResult(dict):
def __init__(__self__, *,
compartment_id: str,
description: str,
details: 'outputs.GetResponderRecipeResponderRuleDetailsResult',
display_name: str,
lifecycle_details: str,
policies: Sequence[str],
responder_rule_id: str,
state: str,
supported_modes: Sequence[str],
time_created: str,
time_updated: str,
type: str):
"""
:param str compartment_id: Compartment Identifier
:param str description: ResponderRule Description
:param 'GetResponderRecipeResponderRuleDetailsArgs' details: Details of ResponderRule.
:param str display_name: ResponderRule Display Name
:param str lifecycle_details: A message describing the current state in more detail. For example, can be used to provide actionable information for a resource in Failed state.
:param Sequence[str] policies: List of Policy
:param str responder_rule_id: Identifier for ResponderRule.
:param str state: The current state of the Example.
:param Sequence[str] supported_modes: Supported Execution Modes
:param str time_created: The date and time the responder recipe was created. Format defined by RFC3339.
:param str time_updated: The date and time the responder recipe was updated. Format defined by RFC3339.
:param str type: Type of Responder
"""
pulumi.set(__self__, "compartment_id", compartment_id)
pulumi.set(__self__, "description", description)
pulumi.set(__self__, "details", details)
pulumi.set(__self__, "display_name", display_name)
pulumi.set(__self__, "lifecycle_details", lifecycle_details)
pulumi.set(__self__, "policies", policies)
pulumi.set(__self__, "responder_rule_id", responder_rule_id)
pulumi.set(__self__, "state", state)
pulumi.set(__self__, "supported_modes", supported_modes)
pulumi.set(__self__, "time_created", time_created)
pulumi.set(__self__, "time_updated", time_updated)
pulumi.set(__self__, "type", type)
@property
@pulumi.getter(name="compartmentId")
def compartment_id(self) -> str:
"""
Compartment Identifier
"""
return pulumi.get(self, "compartment_id")
@property
@pulumi.getter
def description(self) -> str:
"""
ResponderRule Description
"""
return pulumi.get(self, "description")
@property
@pulumi.getter
def details(self) -> 'outputs.GetResponderRecipeResponderRuleDetailsResult':
"""
Details of ResponderRule.
"""
return pulumi.get(self, "details")
@property
@pulumi.getter(name="displayName")
def display_name(self) -> str:
"""
ResponderRule Display Name
"""
return pulumi.get(self, "display_name")
@property
@pulumi.getter(name="lifecycleDetails")
def lifecycle_details(self) -> str:
"""
A message describing the current state in more detail. For example, can be used to provide actionable information for a resource in Failed state.
"""
return pulumi.get(self, "lifecycle_details")
@property
@pulumi.getter
def policies(self) -> Sequence[str]:
"""
List of Policy
"""
return pulumi.get(self, "policies")
@property
@pulumi.getter(name="responderRuleId")
def responder_rule_id(self) -> str:
"""
Identifier for ResponderRule.
"""
return pulumi.get(self, "responder_rule_id")
@property
@pulumi.getter
def state(self) -> str:
"""
The current state of the Example.
"""
return pulumi.get(self, "state")
@property
@pulumi.getter(name="supportedModes")
def supported_modes(self) -> Sequence[str]:
"""
Supported Execution Modes
"""
return pulumi.get(self, "supported_modes")
@property
@pulumi.getter(name="timeCreated")
def time_created(self) -> str:
"""
The date and time the responder recipe was created. Format defined by RFC3339.
"""
return pulumi.get(self, "time_created")
@property
@pulumi.getter(name="timeUpdated")
def time_updated(self) -> str:
"""
The date and time the responder recipe was updated. Format defined by RFC3339.
"""
return pulumi.get(self, "time_updated")
@property
@pulumi.getter
def type(self) -> str:
"""
Type of Responder
"""
return pulumi.get(self, "type")
@pulumi.output_type
class GetResponderRecipeResponderRuleDetailsResult(dict):
def __init__(__self__, *,
condition: str,
configurations: Sequence['outputs.GetResponderRecipeResponderRuleDetailsConfigurationResult'],
is_enabled: bool,
mode: str):
"""
:param Sequence['GetResponderRecipeResponderRuleDetailsConfigurationArgs'] configurations: ResponderRule configurations
:param bool is_enabled: Identifies state for ResponderRule
:param str mode: Execution Mode for ResponderRule
"""
pulumi.set(__self__, "condition", condition)
pulumi.set(__self__, "configurations", configurations)
pulumi.set(__self__, "is_enabled", is_enabled)
pulumi.set(__self__, "mode", mode)
@property
@pulumi.getter
def condition(self) -> str:
return pulumi.get(self, "condition")
@property
@pulumi.getter
def configurations(self) -> Sequence['outputs.GetResponderRecipeResponderRuleDetailsConfigurationResult']:
"""
ResponderRule configurations
"""
return pulumi.get(self, "configurations")
@property
@pulumi.getter(name="isEnabled")
def is_enabled(self) -> bool:
"""
Identifies state for ResponderRule
"""
return pulumi.get(self, "is_enabled")
@property
@pulumi.getter
def mode(self) -> str:
"""
Execution Mode for ResponderRule
"""
return pulumi.get(self, "mode")
@pulumi.output_type
class GetResponderRecipeResponderRuleDetailsConfigurationResult(dict):
def __init__(__self__, *,
config_key: str,
name: str,
value: str):
"""
:param str config_key: Unique name of the configuration
:param str name: configuration name
:param str value: configuration value
"""
pulumi.set(__self__, "config_key", config_key)
pulumi.set(__self__, "name", name)
pulumi.set(__self__, "value", value)
@property
@pulumi.getter(name="configKey")
def config_key(self) -> str:
"""
Unique name of the configuration
"""
return pulumi.get(self, "config_key")
@property
@pulumi.getter
def name(self) -> str:
"""
configuration name
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def value(self) -> str:
"""
configuration value
"""
return pulumi.get(self, "value")
@pulumi.output_type
class GetResponderRecipesFilterResult(dict):
def __init__(__self__, *,
name: str,
values: Sequence[str],
regex: Optional[bool] = None):
"""
:param str name: configuration name
"""
pulumi.set(__self__, "name", name)
pulumi.set(__self__, "values", values)
if regex is not None:
pulumi.set(__self__, "regex", regex)
@property
@pulumi.getter
def name(self) -> str:
"""
configuration name
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def values(self) -> Sequence[str]:
return pulumi.get(self, "values")
@property
@pulumi.getter
def regex(self) -> Optional[bool]:
return pulumi.get(self, "regex")
@pulumi.output_type
class GetResponderRecipesResponderRecipeCollectionResult(dict):
def __init__(__self__, *,
items: Sequence['outputs.GetResponderRecipesResponderRecipeCollectionItemResult']):
pulumi.set(__self__, "items", items)
@property
@pulumi.getter
def items(self) -> Sequence['outputs.GetResponderRecipesResponderRecipeCollectionItemResult']:
return pulumi.get(self, "items")
@pulumi.output_type
class GetResponderRecipesResponderRecipeCollectionItemResult(dict):
def __init__(__self__, *,
compartment_id: str,
defined_tags: Mapping[str, Any],
description: str,
display_name: str,
effective_responder_rules: Sequence['outputs.GetResponderRecipesResponderRecipeCollectionItemEffectiveResponderRuleResult'],
freeform_tags: Mapping[str, Any],
id: str,
lifecycle_details: str,
owner: str,
responder_rules: Sequence['outputs.GetResponderRecipesResponderRecipeCollectionItemResponderRuleResult'],
source_responder_recipe_id: str,
state: str,
system_tags: Mapping[str, Any],
time_created: str,
time_updated: str):
"""
:param str compartment_id: The ID of the compartment in which to list resources.
:param Mapping[str, Any] defined_tags: Defined tags for this resource. Each key is predefined and scoped to a namespace. Example: `{"foo-namespace.bar-key": "value"}`
:param str description: ResponderRule Description
:param str display_name: A filter to return only resources that match the entire display name given.
:param Sequence['GetResponderRecipesResponderRecipeCollectionItemEffectiveResponderRuleArgs'] effective_responder_rules: List of responder rules associated with the recipe
:param Mapping[str, Any] freeform_tags: Simple key-value pair that is applied without any predefined name, type or scope. Exists for cross-compatibility only. Example: `{"bar-key": "value"}`
:param str id: Identifier for ResponderRecipe.
:param str lifecycle_details: A message describing the current state in more detail. For example, can be used to provide actionable information for a resource in Failed state.
:param str owner: Owner of ResponderRecipe
:param Sequence['GetResponderRecipesResponderRecipeCollectionItemResponderRuleArgs'] responder_rules: List of responder rules associated with the recipe
:param str source_responder_recipe_id: The id of the source responder recipe.
:param str state: The field life cycle state. Only one state can be provided. Default value for state is active. If no value is specified state is active.
:param Mapping[str, Any] system_tags: System tags for this resource. Each key is predefined and scoped to a namespace. For more information, see [Resource Tags](https://docs.cloud.oracle.com/iaas/Content/General/Concepts/resourcetags.htm). System tags can be viewed by users, but can only be created by the system. Example: `{"orcl-cloud.free-tier-retained": "true"}`
:param str time_created: The date and time the responder recipe was created. Format defined by RFC3339.
:param str time_updated: The date and time the responder recipe was updated. Format defined by RFC3339.
"""
pulumi.set(__self__, "compartment_id", compartment_id)
pulumi.set(__self__, "defined_tags", defined_tags)
pulumi.set(__self__, "description", description)
pulumi.set(__self__, "display_name", display_name)
pulumi.set(__self__, "effective_responder_rules", effective_responder_rules)
pulumi.set(__self__, "freeform_tags", freeform_tags)
pulumi.set(__self__, "id", id)
pulumi.set(__self__, "lifecycle_details", lifecycle_details)
pulumi.set(__self__, "owner", owner)
pulumi.set(__self__, "responder_rules", responder_rules)
pulumi.set(__self__, "source_responder_recipe_id", source_responder_recipe_id)
pulumi.set(__self__, "state", state)
pulumi.set(__self__, "system_tags", system_tags)
pulumi.set(__self__, "time_created", time_created)
pulumi.set(__self__, "time_updated", time_updated)
@property
@pulumi.getter(name="compartmentId")
def compartment_id(self) -> str:
"""
The ID of the compartment in which to list resources.
"""
return pulumi.get(self, "compartment_id")
@property
@pulumi.getter(name="definedTags")
def defined_tags(self) -> Mapping[str, Any]:
"""
Defined tags for this resource. Each key is predefined and scoped to a namespace. Example: `{"foo-namespace.bar-key": "value"}`
"""
return pulumi.get(self, "defined_tags")
@property
@pulumi.getter
def description(self) -> str:
"""
ResponderRule Description
"""
return pulumi.get(self, "description")
@property
@pulumi.getter(name="displayName")
def display_name(self) -> str:
"""
A filter to return only resources that match the entire display name given.
"""
return pulumi.get(self, "display_name")
@property
@pulumi.getter(name="effectiveResponderRules")
def effective_responder_rules(self) -> Sequence['outputs.GetResponderRecipesResponderRecipeCollectionItemEffectiveResponderRuleResult']:
"""
| |
<reponame>HarveyYan/RNAonGraph<filename>Model/Joint_MRT.py
import os
import sys
import time
import math
import numpy as np
import tensorflow as tf
import subprocess as sp
from Bio.Align.Applications import ClustalwCommandline
basedir = os.path.split(os.path.dirname(os.path.abspath(__file__)))[0]
sys.path.append(basedir)
sys.path.append(os.path.dirname(os.path.abspath(__file__)))
from Model import _stats
from lib.rgcn_utils import normalize
import lib.plot, lib.logger, lib.clr
import lib.ops.LSTM, lib.ops.Linear, lib.ops.Conv1D
from lib.tf_ghm_loss import get_ghm_weights
from lib.AMSGrad import AMSGrad
class JMRT:
def __init__(self, node_dim, embedding_vec, gpu_device, **kwargs):
self.node_dim = node_dim
self.embedding_vec = embedding_vec
self.vocab_size = embedding_vec.shape[0]
self.gpu_device = gpu_device
# hyperparams
self.units = kwargs.get('units', 32)
self.pool_steps = kwargs.get('pool_steps', 10)
self.lstm_encoder = kwargs.get('lstm_encoder', True)
self.dropout_rate = kwargs.get('dropout_rate', 0.2)
self.learning_rate = kwargs.get('learning_rate', 2e-4)
self.use_clr = kwargs.get('use_clr', False)
self.use_momentum = kwargs.get('use_momentum', False)
self.use_bn = kwargs.get('use_bn', False)
self.mixing_ratio = kwargs.get('mixing_ratio', 0.)
self.use_ghm = kwargs.get('use_ghm', False)
self.g = tf.Graph()
with self.g.as_default():
self._placeholders()
if self.use_momentum:
self.optimizer = tf.contrib.opt.MomentumWOptimizer(
1e-4, self.learning_rate * self.lr_multiplier,
0.9, use_nesterov=True
)
else:
self.optimizer = AMSGrad(
self.learning_rate * self.lr_multiplier,
beta2=0.999
)
with tf.variable_scope('Classifier', reuse=tf.AUTO_REUSE):
self._build_ggnn()
self._loss()
self._train()
self._merge()
with tf.control_dependencies(tf.get_collection(tf.GraphKeys.UPDATE_OPS)):
self.train_op = self.optimizer.apply_gradients(self.gv)
_stats('Joint_MRT', self.gv)
self.saver = tf.train.Saver(max_to_keep=5)
self.init = tf.global_variables_initializer()
self.local_init = tf.local_variables_initializer()
self.g.finalize()
self._init_session()
def _placeholders(self):
self.node_input_ph = tf.placeholder(tf.int32, shape=[None, ]) # nb_nodes
# nb_nodes x nb_nodes
self.labels = tf.placeholder(tf.int32, shape=[None, None, ])
self.max_len = tf.placeholder(tf.int32, shape=())
self.segment_length = tf.placeholder(tf.int32, shape=[None, ])
self.is_training_ph = tf.placeholder(tf.bool, ())
self.global_step = tf.placeholder(tf.int32, ())
self.hf_iters_per_epoch = tf.placeholder(tf.int32, ())
if self.use_clr:
self.lr_multiplier = lib.clr. \
cyclic_learning_rate(self.global_step, 0.5, 5.,
self.hf_iters_per_epoch, mode='exp_range')
else:
self.lr_multiplier = 1.
def _build_ggnn(self):
embedding = tf.get_variable('embedding_layer', shape=(self.vocab_size, self.node_dim),
initializer=tf.constant_initializer(self.embedding_vec), trainable=False)
output = tf.nn.embedding_lookup(embedding, self.node_input_ph)
self.node_tensor = output
# while loop to recover batch size
batch_output = tf.TensorArray(tf.float32, size=tf.shape(self.segment_length)[0], infer_shape=True,
dynamic_size=True)
mask_offset = tf.TensorArray(tf.int32, size=tf.shape(self.segment_length)[0], infer_shape=True,
dynamic_size=True)
i = tf.constant(0)
start_idx = tf.constant(0)
while_condition = lambda i, _1, _2, _3: tf.less(i, tf.shape(self.segment_length)[0])
def body(i, start_idx, batch_output, mask_offset):
end_idx = start_idx + self.segment_length[i]
segment = output[start_idx:end_idx]
# pad segment to max len
segment = tf.pad(segment, [[self.max_len - self.segment_length[i], 0], [0, 0]])
batch_output = batch_output.write(i, segment)
mask_offset = mask_offset.write(i, self.max_len - self.segment_length[i])
return [tf.add(i, 1), end_idx, batch_output, mask_offset]
_, _, batch_output, mask_offset = tf.while_loop(while_condition, body,
[i, start_idx, batch_output, mask_offset])
output = batch_output.stack()
mask_offset = mask_offset.stack()
self.mask_offset = mask_offset
with tf.variable_scope('seq_scan'):
# paddings will influence the prediction results, even unavoidable if batch norm is used
# but the influence will be very small, enough to ignore it
output = lib.ops.Conv1D.conv1d('conv1', self.node_dim, self.units, 10, output, biases=False,
pad_mode='SAME', variables_on_cpu=False)
output = normalize('bn1', output, self.use_bn, self.is_training_ph)
output = tf.nn.relu(output)
output = tf.layers.dropout(output, self.dropout_rate, training=self.is_training_ph)
output = lib.ops.Conv1D.conv1d('conv2', self.units, self.units, 10, output, biases=False,
pad_mode='SAME', variables_on_cpu=False)
output = normalize('bn2', output, self.use_bn, self.is_training_ph)
output = tf.nn.relu(output)
output = tf.layers.dropout(output, self.dropout_rate, training=self.is_training_ph)
with tf.variable_scope('set2set_pooling'):
output = lib.ops.LSTM.set2set_pooling('set2set_pooling', output, self.pool_steps, self.dropout_rate,
self.is_training_ph, self.lstm_encoder, mask_offset,
variables_on_cpu=False)
self.nuc_embedding = tf.get_collection('nuc_emb')[0] # will depend on if bilstm encoder is used or not
self.nuc_output = lib.ops.Linear.linear('bilstm_nuc_output',
self.units * 2 if self.lstm_encoder else self.units, 2,
self.nuc_embedding)
self.output = lib.ops.Linear.linear('OutputMapping', output.get_shape().as_list()[-1],
2, output, variables_on_cpu=False) # categorical logits
def _loss(self):
self.prediction = tf.nn.softmax(self.output)
self.nuc_prediction = tf.nn.softmax(self.nuc_output)
# graph level loss
self.graph_cost = tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits(
logits=self.output, # reduce along the RNA sequence to a graph label
labels=tf.one_hot(tf.reduce_max(self.labels, axis=1), depth=2),
))
# nucleotide level loss
# dummies are padded to the front...
self.mask = 1.0 - tf.sequence_mask(self.mask_offset, maxlen=self.max_len, dtype=tf.float32)
if self.use_ghm:
self.nuc_cost = tf.reduce_sum(
get_ghm_weights(self.nuc_prediction, self.labels, self.mask,
bins=10, alpha=0.75, name='GHM_NUC_EMB') * \
tf.nn.softmax_cross_entropy_with_logits(
logits=self.nuc_output,
labels=tf.one_hot(self.labels, depth=2),
) / tf.cast(tf.reduce_sum(self.segment_length), tf.float32)
)
else:
self.nuc_cost = tf.reduce_sum(
self.mask * \
tf.nn.softmax_cross_entropy_with_logits(
logits=self.nuc_output,
labels=tf.one_hot(self.labels, depth=2),
)) / tf.cast(tf.reduce_sum(self.segment_length), tf.float32)
self.cost = self.mixing_ratio * self.graph_cost + (1. - self.mixing_ratio) * self.nuc_cost
def _train(self):
self.gv = self.optimizer.compute_gradients(self.cost,
var_list=[var for var in tf.trainable_variables()],
colocate_gradients_with_ops=True)
def _merge(self):
# If the example contains a binding site: more global
self.seq_acc_val, self.seq_acc_update_op = tf.metrics.accuracy(
labels=tf.reduce_max(self.labels, axis=-1),
predictions=tf.to_int32(tf.argmax(self.prediction, axis=-1)),
)
# nucleotide level accuracy of containing a binding site
self.nuc_acc_val, self.nuc_acc_update_op = tf.metrics.accuracy(
labels=tf.reduce_max(self.labels, axis=-1),
predictions=tf.to_int32(tf.reduce_max(
tf.argmax(self.nuc_prediction, axis=-1), axis=-1)),
)
self.acc_val = [self.seq_acc_val, self.nuc_acc_val]
self.acc_update_op = [self.seq_acc_update_op, self.nuc_acc_update_op]
# graph level ROC AUC
self.auc_val, self.auc_update_op = tf.metrics.auc(
labels=tf.reduce_max(self.labels, axis=-1),
predictions=self.prediction[:, 1],
)
self.g_nodes = tf.gradients(self.prediction[:, 1], self.node_tensor)[0]
def _init_session(self):
gpu_options = tf.GPUOptions()
gpu_options.per_process_gpu_memory_fraction = 0.2
if type(self.gpu_device) is list:
gpu_options.visible_device_list = ','.join([device[-1] for device in self.gpu_device])
else:
gpu_options.visible_device_list = self.gpu_device[-1]
self.sess = tf.Session(graph=self.g, config=tf.ConfigProto(gpu_options=gpu_options))
self.sess.run(self.init)
self.sess.run(self.local_init)
def reset_session(self):
del self.saver
with self.g.as_default():
self.saver = tf.train.Saver(max_to_keep=5)
self.sess.run(self.init)
self.sess.run(self.local_init)
lib.plot.reset()
@classmethod
def indexing_iterable(cls, iterable, idx):
return [item[idx] for item in iterable]
@classmethod
def random_crop(cls, node_tensor, raw_seq, y, pos_read_retention_rate=0.5):
m_seq, m_label, m_sg, m_data, m_row_col = [], [], [], [], []
for seq, _raw_seq, label in zip(node_tensor, raw_seq, y):
if np.max(label) == 0:
# negative sequence
pseudo_label = (np.array(list(_raw_seq)) <= 'Z').astype(np.int32)
pos_idx = np.where(pseudo_label == 1)[0]
else:
pos_idx = np.where(label == 1)[0]
# keep more than 3/4 of the sequence (length), and random start
read_length = len(pos_idx)
rate = min(max(pos_read_retention_rate, np.random.rand()), 0.9)
winsize = int(rate * read_length)
surplus = read_length - winsize + 1
start_idx = np.random.choice(range(int(surplus / 4), math.ceil(surplus * 3 / 4)))
label = [0] * (pos_idx[0] + start_idx) + [1] * winsize + [0] * \
(len(seq) - winsize - start_idx - pos_idx[0])
left_truncate = int(np.random.rand() * pos_idx[0])
right_truncate = int(np.random.rand() * (len(seq) - pos_idx[-1] - 1))
if not right_truncate > 0:
right_truncate = -len(seq)
seq = seq[left_truncate: -right_truncate]
label = label[left_truncate: -right_truncate]
m_seq.append(seq)
m_sg.append(len(seq))
m_label.append(label)
return np.array(m_seq), np.array(m_sg), np.array(m_label)
def fit(self, X, y, epochs, batch_size, output_dir, logging=False, epoch_to_start=0, random_crop=False):
checkpoints_dir = os.path.join(output_dir, 'checkpoints/')
if not os.path.exists(checkpoints_dir):
os.makedirs(checkpoints_dir)
# split validation set
row_sum = np.array(list(map(lambda label: np.sum(label), y)))
pos_idx, neg_idx = np.where(row_sum > 0)[0], np.where(row_sum == 0)[0]
dev_idx = np.array(list(np.random.choice(pos_idx, int(len(pos_idx) * 0.1), False)) + \
list(np.random.choice(neg_idx, int(len(neg_idx) * 0.1), False)))
train_idx = np.delete(np.arange(len(y)), dev_idx)
dev_data = self.indexing_iterable(X, dev_idx)
dev_targets = y[dev_idx]
X = self.indexing_iterable(X, train_idx)
train_targets = y[train_idx]
size_train = train_targets.shape[0]
iters_per_epoch = size_train // batch_size + (0 if size_train % batch_size == 0 else 1)
best_dev_cost = np.inf
lib.plot.set_output_dir(output_dir)
if logging:
logger = lib.logger.CSVLogger('run.csv', output_dir,
['epoch', 'cost', 'graph_cost', 'nuc_cost',
'seq_acc', 'nuc_acc', 'auc',
'dev_cost', 'dev_graph_cost', 'dev_nuc_cost',
'dev_seq_acc', 'dev_nuc_acc', 'dev_auc'])
for epoch in range(epoch_to_start, epochs):
permute = np.random.permutation(size_train)
node_tensor, segment_length, raw_seq = self.indexing_iterable(X, permute)
y = train_targets[permute]
if random_crop:
# augmentation
node_tensor, segment_length, y = \
self.random_crop(node_tensor, raw_seq, y)
prepro_time = 0.
training_time = 0.
for i in range(iters_per_epoch):
prepro_start = time.time()
_node_tensor, _segment, _labels \
= node_tensor[i * batch_size: (i + 1) * batch_size], \
segment_length[i * batch_size: (i + 1) * batch_size], \
y[i * batch_size: (i + 1) * batch_size]
_max_len = max(_segment)
_labels = np.array([np.pad(label, [_max_len - len(label), 0], mode='constant') for label in _labels])
feed_dict = {
self.node_input_ph: np.concatenate(_node_tensor, axis=0),
self.labels: _labels,
self.max_len: _max_len,
self.segment_length: _segment,
self.global_step: i,
self.hf_iters_per_epoch: iters_per_epoch // 2,
self.is_training_ph: True
}
prepro_end = time.time()
prepro_time += (prepro_end - prepro_start)
self.sess.run(self.train_op, feed_dict)
training_time += (time.time() - prepro_end)
print('preprocessing time: %.4f, training time: %.4f' % (prepro_time / (i + 1), training_time / (i + 1)))
train_cost, train_acc, train_auc = self.evaluate(X, train_targets, batch_size)
lib.plot.plot('train_cost', train_cost[0])
lib.plot.plot('train_graph_cost', train_cost[1])
lib.plot.plot('train_nuc_cost', train_cost[2])
lib.plot.plot('train_seq_acc', train_acc[0])
lib.plot.plot('train_nuc_acc', train_acc[1])
lib.plot.plot('train_auc', train_auc)
dev_cost, dev_acc, dev_auc = self.evaluate(dev_data, dev_targets, batch_size)
lib.plot.plot('dev_cost', dev_cost[0])
lib.plot.plot('dev_graph_cost', dev_cost[1])
lib.plot.plot('dev_nuc_cost', dev_cost[2])
lib.plot.plot('dev_seq_acc', dev_acc[0])
lib.plot.plot('dev_nuc_acc', dev_acc[1])
lib.plot.plot('dev_auc', dev_auc)
logger.update_with_dict({
'epoch': epoch, 'cost': train_cost[0], 'graph_cost': train_cost[1],
'nuc_cost': train_cost[2], 'seq_acc': train_acc[0], 'nuc_acc': train_acc[1],
'auc': train_auc,
'dev_cost': dev_cost[0], 'dev_graph_cost': dev_cost[1],
'dev_nuc_cost': dev_cost[2], 'dev_seq_acc': dev_acc[0],
'dev_nuc_acc': dev_acc[1], 'dev_auc': dev_auc,
})
lib.plot.flush()
lib.plot.tick()
if dev_cost[0] < best_dev_cost and epoch - epoch_to_start >= 10: # unstable loss in the beginning
best_dev_cost = dev_cost[0]
save_path = self.saver.save(self.sess, checkpoints_dir, global_step=epoch)
print('Validation sample cost improved. Saved to path %s\n' % (save_path), flush=True)
else:
print('\n', flush=True)
print('Loading best weights %s' % (save_path), flush=True)
self.saver.restore(self.sess, save_path)
if logging:
logger.close()
def evaluate(self, X, y, batch_size, random_crop=False):
node_tensor, segment_length, raw_seq = X
if random_crop:
# augmentation
node_tensor, segment_length, y = \
self.random_crop(node_tensor, raw_seq, y)
all_cost = 0.
all_graph_cost = 0.
all_bilstm_nuc_cost = 0.
iters_per_epoch = len(node_tensor) // batch_size + (0 if len(node_tensor) % batch_size == 0 else 1)
for i in range(iters_per_epoch):
_node_tensor, _segment, _labels \
= node_tensor[i * batch_size: (i + 1) * batch_size], \
segment_length[i * batch_size: (i + 1) * batch_size], \
y[i * batch_size: (i + 1) * batch_size]
_max_len = max(_segment)
_labels = np.array([np.pad(label, [_max_len - | |
# Copyright (c) 2019 China Telecom Corporation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from oslo_utils import uuidutils
from oslo_config import cfg
from oslo_config import fixture as oslo_fixture
from octavia.common import constants
import octavia.common.context
from octavia.db import api as db_api
from octavia.tests.functional.api.v2 import base
class TestDistributor(base.BaseAPITest):
root_tag = 'distributor'
root_tag_list = 'distributors'
root_tag_links = 'distributors_links'
def setUp(self):
super(TestDistributor, self).setUp()
def _assert_request_matches_response(self, req, resp, **optionals):
self.assertTrue(uuidutils.is_uuid_like(resp.get('id')))
req_description = req.get('description')
self.assertEqual(req.get('name'), resp.get('name'))
if not req_description:
self.assertEqual('', resp.get('description'))
else:
self.assertEqual(req.get('description'), resp.get('description'))
self.assertEqual(req.get('distributor_driver'),
resp.get('distributor_driver'))
self.assertEqual(req.get('frontend_subnet'),
resp.get('frontend_subnet'))
self.assertEqual(req.get('config_data'), resp.get('config_data'))
self.assertEqual(constants.PENDING_CREATE,
resp.get('provisioning_status'))
self.assertEqual(constants.OFFLINE, resp.get('operating_status'))
self.assertEqual(req.get('admin_state_up', True),
resp.get('admin_state_up'))
for key, value in optionals.items():
self.assertEqual(value, req.get(key))
def test_empty_list(self):
response = self.get(self.DISTRIBUTORS_PATH)
api_list = response.json.get(self.root_tag_list)
self.assertEqual([], api_list)
def test_create(self, **optionals):
distributor_json = {
'name': 'test1',
'frontend_subnet': uuidutils.generate_uuid(),
'distributor_driver': 'noop',
'config_data': '{}'}
distributor_json.update(optionals)
body = self._build_body(distributor_json)
response = self.post(self.DISTRIBUTORS_PATH, body)
api_distributor = response.json.get(self.root_tag)
self._assert_request_matches_response(distributor_json,
api_distributor)
return api_distributor
def test_create_with_invalid_driver(self):
distributor_json = {
'name': 'test2',
'frontend_subnet': uuidutils.generate_uuid(),
'distributor_driver': 'fake_noop',
'config_data': '{}'}
body = self._build_body(distributor_json)
response = self.post(self.DISTRIBUTORS_PATH, body, status=400)
err_msg = ("fake_noop is not a valid option for distributor_driver")
self.assertEqual(err_msg, response.json.get('faultstring'))
def test_create_with_missing_name(self):
distributor_json = {
'frontend_subnet': uuidutils.generate_uuid(),
'distributor_driver': 'noop',
'config_data': '{}'}
body = self._build_body(distributor_json)
response = self.post(self.DISTRIBUTORS_PATH, body, status=400)
err_msg = ("Invalid input for field/attribute name. Value: "
"'None'. Mandatory field missing.")
self.assertEqual(err_msg, response.json.get('faultstring'))
def test_create_with_missing_frontend_subnet(self):
distributor_json = {
'name': 'test1',
'distributor_driver': 'noop',
'config_data': '{}'}
body = self._build_body(distributor_json)
response = self.post(self.DISTRIBUTORS_PATH, body, status=400)
err_msg = ("Invalid input for field/attribute frontend_subnet. Value: "
"'None'. Mandatory field missing.")
self.assertEqual(err_msg, response.json.get('faultstring'))
def test_create_with_missing_distributor_driver(self):
distributor_json = {
'name': 'test1',
'frontend_subnet': uuidutils.generate_uuid(),
'config_data': '{}'}
body = self._build_body(distributor_json)
response = self.post(self.DISTRIBUTORS_PATH, body, status=400)
err_msg = ("Invalid input for field/attribute distributor_driver. "
"Value: 'None'. Mandatory field missing.")
self.assertEqual(err_msg, response.json.get('faultstring'))
def test_create_with_missing_config_data(self):
distributor_json = {
'name': 'test1',
'frontend_subnet': uuidutils.generate_uuid(),
'distributor_driver': 'noop'}
body = self._build_body(distributor_json)
response = self.post(self.DISTRIBUTORS_PATH, body, status=400)
err_msg = ("Invalid input for field/attribute config_data. Value: "
"'None'. Mandatory field missing.")
self.assertEqual(err_msg, response.json.get('faultstring'))
def test_create_with_long_name(self):
distributor_json = {
'name': 'n' * 256,
'frontend_subnet': uuidutils.generate_uuid(),
'distributor_driver': 'noop',
'config_data': '{}'}
body = self._build_body(distributor_json)
self.post(self.DISTRIBUTORS_PATH, body, status=400)
def test_create_with_long_description(self):
distributor_json = {
'name': 'test-distributor',
'description': 'n' * 256,
'frontend_subnet': uuidutils.generate_uuid(),
'distributor_driver': 'noop',
'config_data': '{}'}
body = self._build_body(distributor_json)
self.post(self.DISTRIBUTORS_PATH, body, status=400)
def test_create_duplicate_names(self):
distributor1 = self.create_distributor(
'name', 'noop', uuidutils.generate_uuid(), '{}')
self.assertTrue(uuidutils.is_uuid_like(distributor1.get('id')))
distributor_json = {
'name': 'name',
'frontend_subnet': uuidutils.generate_uuid(),
'distributor_driver': 'noop',
'config_data': '{}'}
body = self._build_body(distributor_json)
response = self.post(self.DISTRIBUTORS_PATH, body, status=409)
err_msg = "A distributor of name already exists."
self.assertEqual(err_msg, response.json.get('faultstring'))
def test_create_authorized(self):
distributor_json = {
'name': 'test1',
'frontend_subnet': uuidutils.generate_uuid(),
'distributor_driver': 'noop',
'config_data': '{}'}
body = self._build_body(distributor_json)
self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF))
auth_strategy = self.conf.conf.api_settings.get('auth_strategy')
self.conf.config(group='api_settings', auth_strategy=constants.TESTING)
project_id = uuidutils.generate_uuid()
with mock.patch.object(octavia.common.context.Context, 'project_id',
project_id):
override_credentials = {
'service_user_id': None,
'user_domain_id': None,
'is_admin_project': True,
'service_project_domain_id': None,
'service_project_id': None,
'roles': ['load-balancer_member'],
'user_id': None,
'is_admin': True,
'service_user_domain_id': None,
'project_domain_id': None,
'service_roles': [],
'project_id': project_id}
with mock.patch(
"oslo_context.context.RequestContext.to_policy_values",
return_value=override_credentials):
response = self.post(self.DISTRIBUTORS_PATH, body)
api_flavor = response.json.get(self.root_tag)
self.conf.config(group='api_settings', auth_strategy=auth_strategy)
self._assert_request_matches_response(distributor_json, api_flavor)
def test_create_not_authorized(self):
self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF))
auth_strategy = self.conf.conf.api_settings.get('auth_strategy')
self.conf.config(group='api_settings', auth_strategy=constants.TESTING)
distributor_json = {
'name': 'test1',
'frontend_subnet': uuidutils.generate_uuid(),
'distributor_driver': 'noop',
'config_data': '{}'}
body = self._build_body(distributor_json)
response = self.post(self.DISTRIBUTORS_PATH, body, status=403)
api_flavor = response.json
self.conf.config(group='api_settings', auth_strategy=auth_strategy)
self.assertEqual(self.NOT_AUTHORIZED_BODY, api_flavor)
def test_create_l3_distributor(self):
distributor_json = {
'name': 'test1',
'frontend_subnet': uuidutils.generate_uuid(),
'distributor_driver': 'l3',
'config_data': '{"as": 62000, "router_id": "2.2.2.2"}'}
body = self._build_body(distributor_json)
response = self.post(self.DISTRIBUTORS_PATH, body)
api_distributor = response.json.get(self.root_tag)
self._assert_request_matches_response(distributor_json,
api_distributor)
def test_create_l3_distributor_with_invalid_options(self):
distributor_json = {
'name': 'test1',
'frontend_subnet': uuidutils.generate_uuid(),
'distributor_driver': 'l3',
'config_data': '{"as": "dddd", "router_id": "2.2.2.2"}'}
body = self._build_body(distributor_json)
response = self.post(self.DISTRIBUTORS_PATH, body, status=501)
err_msg = ("The \'l3\' distributor does not support a requested "
"option: as u\'dddd\' is not of type \'number\'")
self.assertEqual(err_msg, response.json.get('faultstring'))
def test_get(self):
distributor = self.create_distributor(
'name', 'noop', uuidutils.generate_uuid(), '{}')
self.assertTrue(uuidutils.is_uuid_like(distributor.get('id')))
response = self.get(
self.DISTRIBUTOR_PATH.format(
distributor_id=distributor.get('id'))).json.get(self.root_tag)
self.assertEqual('name', response.get('name'))
self.assertEqual(distributor.get('frontend_subnet'),
response.get('frontend_subnet'))
self.assertEqual(distributor.get('distributor_driver'),
response.get('distributor_driver'))
self.assertEqual(distributor.get('config_data'),
response.get('config_data'))
self.assertTrue(response.get('admin_state_up'))
def test_get_one_fields_filter(self):
distributor = self.create_distributor(
'name', 'noop', uuidutils.generate_uuid(), '{}')
self.assertTrue(uuidutils.is_uuid_like(distributor.get('id')))
response = self.get(
self.DISTRIBUTOR_PATH.format(
distributor_id=distributor.get('id')),
params={
'fields': ['id', 'frontend_subnet']}).json.get(self.root_tag)
self.assertEqual(distributor.get('id'), response.get('id'))
self.assertEqual(distributor.get('frontend_subnet'),
response.get('frontend_subnet'))
self.assertIn(u'id', response)
self.assertIn(u'frontend_subnet', response)
self.assertNotIn(u'name', response)
self.assertNotIn(u'description', response)
self.assertNotIn(u'distributor_drvier', response)
def test_get_authorized(self):
distributor = self.create_distributor(
'name', 'noop', uuidutils.generate_uuid(), '{}')
self.assertTrue(uuidutils.is_uuid_like(distributor.get('id')))
self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF))
auth_strategy = self.conf.conf.api_settings.get('auth_strategy')
self.conf.config(group='api_settings', auth_strategy=constants.TESTING)
project_id = uuidutils.generate_uuid()
with mock.patch.object(octavia.common.context.Context, 'project_id',
project_id):
override_credentials = {
'service_user_id': None,
'user_domain_id': None,
'is_admin_project': True,
'service_project_domain_id': None,
'service_project_id': None,
'roles': ['load-balancer_member'],
'user_id': None,
'is_admin': False,
'service_user_domain_id': None,
'project_domain_id': None,
'service_roles': [],
'project_id': project_id}
with mock.patch(
"oslo_context.context.RequestContext.to_policy_values",
return_value=override_credentials):
response = self.get(
self.DISTRIBUTOR_PATH.format(
distributor_id=distributor.get(
'id'))).json.get(self.root_tag)
self.conf.config(group='api_settings', auth_strategy=auth_strategy)
self.assertEqual('name', response.get('name'))
self.assertEqual('', response.get('description'))
self.assertEqual(distributor.get('id'), response.get('id'))
self.assertEqual(distributor.get('frontend_subnet'),
response.get('frontend_subnet'))
self.assertEqual(distributor.get('distributor_driver'),
response.get('distributor_driver'))
self.assertEqual(distributor.get('config_data'),
response.get('config_data'))
self.assertTrue(response.get('admin_state_up'))
def test_get_not_authorized(self):
distributor = self.create_distributor(
'name', 'noop', uuidutils.generate_uuid(), '{}')
self.assertTrue(uuidutils.is_uuid_like(distributor.get('id')))
self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF))
auth_strategy = self.conf.conf.api_settings.get('auth_strategy')
self.conf.config(group='api_settings', auth_strategy=constants.TESTING)
response = self.get(self.DISTRIBUTOR_PATH.format(
distributor_id=distributor.get('id')), status=403).json
self.conf.config(group='api_settings', auth_strategy=auth_strategy)
self.assertEqual(self.NOT_AUTHORIZED_BODY, response)
def test_get_all(self):
distributor1 = self.create_distributor(
'name1', 'noop', 'd21bf20d-c323-4004-bf67-f90591ceced9',
'{}', **{u'description': u'description'})
self.assertTrue(uuidutils.is_uuid_like(distributor1.get('id')))
ref_distributor_1 = {
u'description': u'description', u'admin_state_up': True,
u'frontend_subnet': u'd21bf20d-c323-4004-bf67-f90591ceced9',
u'id': distributor1.get('id'), u'config_data': u'{}',
u'provisioning_status': u'PENDING_CREATE',
u'operating_status': u'OFFLINE',
u'distributor_driver': 'noop', u'name': u'name1'}
distributor2 = self.create_distributor(
'name2', 'noop', '2a1bf24c-6b23-8a04-5b67-f90591cece10', '{}')
self.assertTrue(uuidutils.is_uuid_like(distributor2.get('id')))
ref_distributor_2 = {
u'provisioning_status': u'PENDING_CREATE',
u'distributor_driver': u'noop', u'config_data': u'{}',
u'frontend_subnet': u'2a1bf24c-6b23-8a04-5b67-f90591cece10',
u'name': u'name2', u'operating_status': u'OFFLINE',
u'description': u'', u'admin_state_up': True,
u'id': distributor2.get('id')}
response = self.get(self.DISTRIBUTORS_PATH)
api_list = response.json.get(self.root_tag_list)
self.assertEqual(2, len(api_list))
self.assertIn(ref_distributor_1, api_list)
self.assertIn(ref_distributor_2, api_list)
def test_get_all_fields_filter(self):
distributor1 = self.create_distributor(
'name1', 'noop', 'd21bf20d-c323-4004-bf67-f90591ceced9',
'{}', **{u'description': u'description'})
self.assertTrue(uuidutils.is_uuid_like(distributor1.get('id')))
distributor2 = self.create_distributor(
'name2', 'noop', '2a1bf24c-6b23-8a04-5b67-f90591cece10', '{}')
self.assertTrue(uuidutils.is_uuid_like(distributor2.get('id')))
response = self.get(self.DISTRIBUTORS_PATH, params={
'fields': ['id', 'name']})
api_list = response.json.get(self.root_tag_list)
self.assertEqual(2, len(api_list))
for distributor in api_list:
self.assertIn(u'id', distributor)
self.assertIn(u'name', distributor)
self.assertNotIn(u'frontend_subnet', distributor)
self.assertNotIn(u'distributor_driver', distributor)
self.assertNotIn(u'admin_state_up', distributor)
def test_get_all_authorized(self):
distributor1 = self.create_distributor(
'name1', 'noop', 'd21bf20d-c323-4004-bf67-f90591ceced9',
'{}', **{u'description': u'description'})
self.assertTrue(uuidutils.is_uuid_like(distributor1.get('id')))
distributor2 = self.create_distributor(
'name2', 'noop', '2a1bf24c-6b23-8a04-5b67-f90591cece10', '{}')
self.assertTrue(uuidutils.is_uuid_like(distributor2.get('id')))
response = self.get(self.DISTRIBUTORS_PATH)
self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF))
auth_strategy = self.conf.conf.api_settings.get('auth_strategy')
self.conf.config(group='api_settings', auth_strategy=constants.TESTING)
project_id = uuidutils.generate_uuid()
with mock.patch.object(octavia.common.context.Context, 'project_id',
project_id):
override_credentials = {
'service_user_id': None,
'user_domain_id': None,
'is_admin_project': True,
'service_project_domain_id': None,
'service_project_id': None,
'roles': ['load-balancer_member'],
'user_id': None,
'is_admin': False,
'service_user_domain_id': None,
'project_domain_id': None,
'service_roles': [],
'project_id': project_id}
with mock.patch(
"oslo_context.context.RequestContext.to_policy_values",
return_value=override_credentials):
api_list = response.json.get(self.root_tag_list)
self.conf.config(group='api_settings', auth_strategy=auth_strategy)
self.assertEqual(2, len(api_list))
def test_get_all_not_authorized(self):
distributor1 = self.create_distributor(
'name1', 'noop', 'd21bf20d-c323-4004-bf67-f90591ceced9',
'{}', **{u'description': u'description'})
self.assertTrue(uuidutils.is_uuid_like(distributor1.get('id')))
distributor2 = self.create_distributor(
'name2', 'noop', '2a1bf24c-6b23-8a04-5b67-f90591cece10', '{}')
self.assertTrue(uuidutils.is_uuid_like(distributor2.get('id')))
self.conf = self.useFixture(oslo_fixture.Config(cfg.CONF))
auth_strategy = self.conf.conf.api_settings.get('auth_strategy')
self.conf.config(group='api_settings', auth_strategy=constants.TESTING)
response = self.get(self.DISTRIBUTORS_PATH, status=403).json
self.conf.config(group='api_settings', auth_strategy=auth_strategy)
self.assertEqual(self.NOT_AUTHORIZED_BODY, response)
def test_update(self):
distributor_json = {
'name': 'test1',
'frontend_subnet': uuidutils.generate_uuid(),
'distributor_driver': 'noop',
'config_data': '{}'}
body = self._build_body(distributor_json)
response = self.post(self.DISTRIBUTORS_PATH, body)
api_distributor = response.json.get(self.root_tag)
distributor_id = api_distributor.get('id')
self._assert_request_matches_response(distributor_json,
api_distributor)
self.set_object_status(self.distributor_repo, distributor_id)
distributor_json = {
'name': 'test2',
'description': "desc test2"}
body = self._build_body(distributor_json)
response = self.put(self.DISTRIBUTOR_PATH.format(
distributor_id=distributor_id), body)
updated_distributor = self.get(self.DISTRIBUTOR_PATH.format(
distributor_id=distributor_id)).json.get(self.root_tag)
self.assertEqual('test2', updated_distributor['name'])
self.assertEqual('desc test2', updated_distributor['description'])
self.assertEqual(distributor_id, updated_distributor['id'])
def test_update_l3_distributor(self):
distributor_json = {
'name': 'test1',
'frontend_subnet': uuidutils.generate_uuid(),
'distributor_driver': 'l3',
'config_data': '{"as": 6235, "router_id": "6.6.6.6"}'}
body = self._build_body(distributor_json)
response = self.post(self.DISTRIBUTORS_PATH, body)
api_distributor = response.json.get(self.root_tag)
distributor_id = api_distributor.get('id')
self._assert_request_matches_response(distributor_json,
api_distributor)
self.set_object_status(self.distributor_repo, distributor_id)
distributor_json = {
'name': 'test2',
'description': "desc test2",
'config_data': '{"as": 6200, "router_id": "6.6.6.6"}'}
body = self._build_body(distributor_json)
response = self.put(self.DISTRIBUTOR_PATH.format(
distributor_id=distributor_id), body)
updated_distributor = self.get(self.DISTRIBUTOR_PATH.format(
distributor_id=distributor_id)).json.get(self.root_tag)
self.assertEqual('test2', updated_distributor['name'])
self.assertEqual('desc test2', updated_distributor['description'])
self.assertEqual('{"as": 6200, "router_id": "6.6.6.6"}',
updated_distributor['config_data'])
self.assertEqual(distributor_id, updated_distributor['id'])
def test_update_l3_distributor_with_invalid_options(self):
distributor_json = {
'name': 'test1',
'frontend_subnet': uuidutils.generate_uuid(),
'distributor_driver': 'l3',
'config_data': '{"as": 6235, "router_id": "6.6.6.6"}'}
body = self._build_body(distributor_json)
response = self.post(self.DISTRIBUTORS_PATH, body)
api_distributor = response.json.get(self.root_tag)
distributor_id = api_distributor.get('id')
self._assert_request_matches_response(distributor_json,
api_distributor)
self.set_object_status(self.distributor_repo, distributor_id)
distributor_json = {
'name': 'test2',
'description': "desc test2",
'config_data': '{"router_id": "6.6.6.6"}'}
body = self._build_body(distributor_json)
response = self.put(self.DISTRIBUTOR_PATH.format(
distributor_id=distributor_id), body, status=501)
err_msg = ("The 'l3' distributor does not support a requested "
"option: 'as' is a required property")
self.assertEqual(err_msg, response.json.get('faultstring'))
def test_update_with_config_data(self):
distributor_json = {
'name': 'test1',
'frontend_subnet': uuidutils.generate_uuid(),
'distributor_driver': 'noop',
'config_data': '{}'}
body = self._build_body(distributor_json)
response = self.post(self.DISTRIBUTORS_PATH, body)
api_distributor = response.json.get(self.root_tag)
distributor_id = api_distributor.get('id')
self._assert_request_matches_response(distributor_json,
api_distributor)
self.set_object_status(self.distributor_repo, distributor_id)
distributor_json = {
'config_data': '{"aaa": "bbb"}'}
body = self._build_body(distributor_json)
response = self.put(self.DISTRIBUTOR_PATH.format(
distributor_id=distributor_id), body)
updated_distributor = self.get(self.DISTRIBUTOR_PATH.format(
distributor_id=distributor_id)).json.get(self.root_tag)
self.assertEqual('{"aaa": "bbb"}', | |
<filename>pynams/blocks.py<gh_stars>1-10
"""
Code for grouping 3 orthogonal Profiles into a single Block object.
"""
from __future__ import print_function, division, absolute_import
import pynams.styles as styles
from pynams.diffusion import models
from pynams import Spectrum
import numpy as np
import matplotlib.pyplot as plt
import lmfit
class Block():
def __init__(self,profiles=[], folder='', name='', get_peakfit=False,
time_seconds=None, sample=None, get_baselines=False,
initialWB=None, celsius=None):
"""
Sets up and checks new Block object.
- Check that profiles list contains a list of three (3) profiles
- Generate list of initial profiles
- Generate list of profile directions
- Verify three profile directions are orthogonal (['a', 'b', 'c'])
- Generate list of ray paths
- Verify three ray path directions are compatible with directions list
"""
if len(profiles) != 3:
print('keyword profiles must be a list of 3 pynams profiles')
print('If you only have 1 or 2, make a dummy profile')
return
self.profiles = profiles
self.folder = folder
self.name = name
self.time_seconds = time_seconds
self.sample = sample
self.peak_diffusivities = []
self.peak_diffusivities_errors = []
self.celsius = celsius
d = []
r = []
ip = []
L = []
for prof in self.profiles:
d.append(prof.direction)
r.append(prof.raypath)
L.append(prof.length_microns)
prof.time_seconds = self.time_seconds
if prof.sample is None and self.sample is not None:
prof.sample = self.sample
elif prof.sample is not None and self.sample is None:
self.sample = prof.sample
elif prof.sample != self.sample:
print('Warning: profile sample does not match wb sample')
if prof.positions_microns is None:
print('profile', prof.profile_name)
print('Needs positions_microns attribute')
if prof.initial_profile is None:
prof.initial_profile = prof
ip.append(prof.initial_profile)
if prof.spectra is None:
prof.make_spectra()
for spec in prof.spectra:
if self.sample is not None:
if prof.raypath == 'a':
meanthick = np.mean(self.sample.length_a_microns)
elif prof.raypath == 'b':
meanthick = np.mean(self.sample.length_b_microns)
elif prof.raypath == 'c':
meanthick = np.mean(self.sample.length_c_microns)
try:
spec.thickness_microns = meanthick
except NameError:
print('Unable to determine thicknesses from sample')
self.directions = d
self.raypaths = r
self.initial_profiles = ip
self.lengths = L
if get_peakfit is True:
for prof in self.profiles:
prof.get_peakfit()
if get_baselines is True:
self.get_baselines()
if initialWB is not None:
for idx, prof in self.profiles:
prof.initial_profile = initialWB.profiles[idx]
def get_peakfits(self, peak_ending='-peakfit.CSV'):
"""Get peakfit information for all profiles"""
for prof in self.profiles:
prof.get_peakfits(peak_ending=peak_ending)
def get_baselines(self, initial_too=True, folder=None, delim=',',
baseline_ending='-baseline.CSV',
print_confirmation=False):
"""Get baselines for all spectra in the block"""
if self.initial_profiles is None:
self.setupWB()
for prof in self.profiles:
for spectrum in prof.spectra:
spectrum.get_baseline(baseline_ending=baseline_ending,
folder=folder, delim=delim,
print_confirmation=print_confirmation)
if initial_too is True:
for prof in self.initial_profiles:
for spectrum in prof.spectra:
spectrum.get_baseline(baseline_ending=baseline_ending,
folder=folder, delim=delim,
print_confirmation=print_confirmation)
def plot_showbaselines(self):
"""Plot baselines for all spectra in the block"""
for prof in self.profiles:
for spec in prof.spectra:
spec.plot_showbaseline()
def plot_subtractbaselines(self):
"""
Plot all spectra using plot_subtractbaseline() with default settings
"""
for prof in self.profiles:
for spec in prof.spectra:
spec.plot_subtractbaseline()
def make_areas(self, show_plot=False, printout_area=False, peak=None):
"""Make list of areas from all profiles, including whole-block areas."""
self.areas = []
self.wb_areas = []
for idx, prof in enumerate(self.profiles):
prof.make_wholeblock()
self.areas.append(prof.areas)
self.wb_areas.append(prof.wb_areas)
def average_spectra(self):
"""
Create and return a single spectrum that is an average of all spectra
in all three profiles of the Block
"""
absorbances_per_cm = []
for prof in self.profiles:
profile_average_spectrum = prof.average_spectra()
absorb = profile_average_spectrum.abs_full_cm
absorbances_per_cm.append(absorb)
ave_abs = np.mean(absorbances_per_cm, axis=0)
avespec = Spectrum(folder=None, fname=None)
avespec.abs_full_cm = ave_abs
avespec.wn_full = prof.spectra[0].wn_full
avespec.abs_raw = ave_abs
if self.name is not None:
avespec.fname = (self.name + '\naverage across all profiles')
else:
avespec.fname = 'average across all profiles'
return avespec
def plot_spectra(self, profile_idx=None, show_baseline=True,
show_initial_ave=True,
show_final_ave=True, plot_all=False,
initial_and_final_together=False, style=styles.style_spectrum,
stylei=styles.style_initial, wn=None):
"""Plot all spectra in all or specified profile in the Block"""
if profile_idx is None:
proflist = ([self.profiles[0]] +
[self.profiles[1]] +
[self.profiles[2]])
else:
proflist = [self.profiles[profile_idx]]
for prof in proflist:
print(prof.profile_name)
prof.plot_spectra(show_baseline=show_baseline,
show_initial_ave=show_initial_ave, plot_all=plot_all,
show_final_ave=show_final_ave,
initial_and_final_together=initial_and_final_together,
style=style, stylei=stylei, wn=None)
def plot_3panels_ave_spectra(self, peak_idx=None, peakwn=None,
top=1., high=4000, low=3000,
style=styles.style_spectrum_red,
stylei=styles.style_initial, show_raypaths=False,
figsize=(6., 4), show_initial=True,
legloc=5, label='Final', figax3=None):
"""
Create three suplots showing average initial and final spectra in each
direction
"""
if self.initial_profiles is None:
self.setupWB()
if figax3 is None:
f, ax3 = plt.subplots(1,3)
f.set_size_inches(figsize)
for idx, ax in enumerate(ax3[:3]):
ax.set_xlim(high, low)
ax.set_ylim(0., top)
if show_raypaths is True:
raypathstring = ''.join(('profile || ', self.profiles[idx].direction,
'\nray path || ', self.profiles[idx].raypath))
ax.text(3500, top-top*0.22, raypathstring,
backgroundcolor='w', horizontalalignment='center')
else:
ax3 = figax3
for k in range(3):
prof = self.profiles[k]
avespec = prof.average_spectra()
ax3[k].plot(avespec.wn_full, avespec.abs_full_cm, label=label,
**style)
if peak_idx is not None:
if self.profiles[k].peakpos is None:
self.profiles[k].get_peakfit()
peakpos = self.profiles[k].peakpos
peakwn = peakpos[peak_idx]
ax3[k].plot([peakwn, peakwn], [0, top], color='r')
if show_initial is True:
iprof = self.initial_profiles[k]
if iprof is not None:
initspec = iprof.average_spectra()
ax3[k].plot(initspec.wn_full, initspec.abs_full_cm,
**stylei)
plt.setp(ax3[1].get_yticklabels(), visible=False)
plt.setp(ax3[2].get_yticklabels(), visible=False)
ax3[1].set_xlabel('wavenumber (cm$^{-1}$)')
ax3[0].set_ylabel('absorbance (cm$^{-1}$)')
if show_initial is True:
ax3[1].legend(loc=legloc)
if figax3 is None:
plt.tight_layout()
plt.gcf().autofmt_xdate()
tit = ' '.join(('Averaged profiles for', self.name))
if peak_idx is not None:
tit = str.join(tit, ', peak at ', str(peakpos[peak_idx]), '/cm')
ax3[1].set_title(tit, zorder=100) # must come after tight_layout
plt.subplots_adjust(top=0.85, bottom=0.25)
if figax3 is None:
return f, ax3
def xy_picker(self, peak_idx=None, wholeblock=True, heights_instead=False,
centered=True, unit='microns'):
""" Pick out and return x and y-data for 3D plotting and diffusion """
positions = []
y = []
for prof in self.profiles:
positions.append(prof.positions_microns)
# Bulk hydrogen
if peak_idx is None:
# whole-block
if wholeblock is True:
try:
y_to_add = prof.wb_areas
except AttributeError:
prof.make_wholeblock(peakfit=False, show_plot=False)
y_to_add = prof.wb_areas
# absolute areas
else:
try:
y_to_add = prof.areas
except AttributeError:
prof.make_areas()
# Peak-specific
else:
if heights_instead is True:
try:
y_to_add = prof.peak_heights[peak_idx]
except AttributeError:
try:
for idx, spec in enumerate(prof.spectra):
h = spec.peak_heights[peak_idx]
prof.peak_heights[peak_idx][idx] = h
except AttributeError:
print('Need profile.peak_heights')
return
else:
try:
y_to_add = prof.peak_areas[peak_idx]
except AttributeError:
try:
for idx, spec in enumerate(prof.spectra):
a = spec.peak_areas[peak_idx]
prof.peak_areas[peak_idx][idx] = a
except AttributeError:
print('Need profile.peak_areas')
return
if wholeblock is True:
peak_wb, peakwn = prof.get_peak_wb_areas(peak_idx,
heights_instead=heights_instead)
y_to_add = peak_wb
y.append(y_to_add)
if centered is True:
a = np.mean(self.profiles[0].sample.length_a_microns) / 2.
b = np.mean(self.profiles[1].sample.length_b_microns) / 2.
c = np.mean(self.profiles[2].sample.length_c_microns) / 2.
halflengths = [a, b, c]
for idx in range(3):
positions[idx] = positions[idx] - halflengths[idx]
return positions, y
def plot_areas_3panels(self, peak_idx=None,
axes3=None,
centered=False,
ytop=None,
heights_instead=False,
wholeblock=False,
xerror=0.,
yerror=None,
scale=1.,
pie=False,
styles3=[styles.style_points]*3,
unit='microns',
show_line_at_1=False,
show_data=True,
show_errorbars=True):
"""
Plots areas (default) or ratio of area to initial area
(wholeblock=True) for all 3 profiles.
Returns figure handle and a list of 3 axes handles (default) unless
axes3 is not equal to None.
Additional keywords are similar to those for profile.plot_areas
"""
if peak_idx is not None:
prof = self.profiles[0]
spec = prof.spectra[0]
try:
peakpos = self.peakpos
except AttributeError:
try:
peakpos = prof.peakpos
except AttributeError:
try:
peakpos = spec.peakpos
except AttributeError:
print('Need peak positions in peakpos attribute')
return
positions, y = self.xy_picker(peak_idx=peak_idx,
wholeblock=wholeblock,
heights_instead=heights_instead,
centered=centered, unit=unit)
if peak_idx is not None:
tit = ' '.join(('Peak at', str(peakpos[peak_idx]), '/cm'))
else:
tit = 'Bulk hydrogen'
if ytop is None:
z = []
for ynum in y:
if len(ynum) > 0:
z.append(max(ynum))
ytop = max(z) + 0.1*max(z)
if unit == 'microns':
lengths = self.lengths
elif unit == 'mm':
lengths = np.array(self.lengths) / 1000.
else:
print('unit must be microns (default) or mm')
return
if show_data is False:
positions = [[], [], []]
y = [[], [], []]
# Sent positions and areas to plotting command
if axes3 is not None:
styles.plot_3panels(positions, y, lengths, figaxis3=axes3,
styles3=styles3, ytop=ytop,
wholeblock=wholeblock,
show_line_at_1=show_line_at_1,
heights_instead=heights_instead,
use_errorbar=show_errorbars,
yerror=yerror, unit=unit, scale=scale,
xerror=xerror, centered=centered)
axes3[1].set_title(tit)
else:
fig, ax = styles.plot_3panels(positions, y, lengths,
styles3=styles3, ytop=ytop,
wholeblock=wholeblock,
show_line_at_1=show_line_at_1,
heights_instead=heights_instead,
use_errorbar=show_errorbars,
yerror=yerror, unit=unit,
xerror=xerror, centered=centered,
scale=scale)
ax[1].set_title(tit)
fig.set_size_inches(6.5, 3.)
fig.autofmt_xdate()
# add pie chart showing % of total height or area
if pie is True:
if peak_idx is None:
pass
else:
ax_pie = fig.add_subplot(339)
atot, htot = prof.get_area_total()
if heights_instead is False:
a = spec.peak_areas[peak_idx] / atot
size = [a, 1. - a]
tit = '% total area'
| |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
from ._enums import *
from ._inputs import *
__all__ = ['DashboardArgs', 'Dashboard']
@pulumi.input_type
class DashboardArgs:
def __init__(__self__, *,
aws_account_id: pulumi.Input[str],
dashboard_id: pulumi.Input[str],
source_entity: pulumi.Input['DashboardSourceEntityArgs'],
dashboard_publish_options: Optional[pulumi.Input['DashboardPublishOptionsArgs']] = None,
name: Optional[pulumi.Input[str]] = None,
parameters: Optional[pulumi.Input['DashboardParametersArgs']] = None,
permissions: Optional[pulumi.Input[Sequence[pulumi.Input['DashboardResourcePermissionArgs']]]] = None,
tags: Optional[pulumi.Input[Sequence[pulumi.Input['DashboardTagArgs']]]] = None,
theme_arn: Optional[pulumi.Input[str]] = None,
version_description: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a Dashboard resource.
:param pulumi.Input[str] name: <p>The display name of the dashboard.</p>
:param pulumi.Input[Sequence[pulumi.Input['DashboardResourcePermissionArgs']]] permissions: <p>A structure that contains the permissions of the dashboard. You can use this structure
for granting permissions by providing a list of IAM action information for each
principal ARN. </p>
<p>To specify no permissions, omit the permissions list.</p>
:param pulumi.Input[Sequence[pulumi.Input['DashboardTagArgs']]] tags: <p>Contains a map of the key-value pairs for the resource tag or tags assigned to the
dashboard.</p>
:param pulumi.Input[str] theme_arn: <p>The Amazon Resource Name (ARN) of the theme that is being used for this dashboard. If
you add a value for this field, it overrides the value that is used in the source
entity. The theme ARN must exist in the same AWS account where you create the
dashboard.</p>
:param pulumi.Input[str] version_description: <p>A description for the first version of the dashboard being created.</p>
"""
pulumi.set(__self__, "aws_account_id", aws_account_id)
pulumi.set(__self__, "dashboard_id", dashboard_id)
pulumi.set(__self__, "source_entity", source_entity)
if dashboard_publish_options is not None:
pulumi.set(__self__, "dashboard_publish_options", dashboard_publish_options)
if name is not None:
pulumi.set(__self__, "name", name)
if parameters is not None:
pulumi.set(__self__, "parameters", parameters)
if permissions is not None:
pulumi.set(__self__, "permissions", permissions)
if tags is not None:
pulumi.set(__self__, "tags", tags)
if theme_arn is not None:
pulumi.set(__self__, "theme_arn", theme_arn)
if version_description is not None:
pulumi.set(__self__, "version_description", version_description)
@property
@pulumi.getter(name="awsAccountId")
def aws_account_id(self) -> pulumi.Input[str]:
return pulumi.get(self, "aws_account_id")
@aws_account_id.setter
def aws_account_id(self, value: pulumi.Input[str]):
pulumi.set(self, "aws_account_id", value)
@property
@pulumi.getter(name="dashboardId")
def dashboard_id(self) -> pulumi.Input[str]:
return pulumi.get(self, "dashboard_id")
@dashboard_id.setter
def dashboard_id(self, value: pulumi.Input[str]):
pulumi.set(self, "dashboard_id", value)
@property
@pulumi.getter(name="sourceEntity")
def source_entity(self) -> pulumi.Input['DashboardSourceEntityArgs']:
return pulumi.get(self, "source_entity")
@source_entity.setter
def source_entity(self, value: pulumi.Input['DashboardSourceEntityArgs']):
pulumi.set(self, "source_entity", value)
@property
@pulumi.getter(name="dashboardPublishOptions")
def dashboard_publish_options(self) -> Optional[pulumi.Input['DashboardPublishOptionsArgs']]:
return pulumi.get(self, "dashboard_publish_options")
@dashboard_publish_options.setter
def dashboard_publish_options(self, value: Optional[pulumi.Input['DashboardPublishOptionsArgs']]):
pulumi.set(self, "dashboard_publish_options", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
<p>The display name of the dashboard.</p>
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def parameters(self) -> Optional[pulumi.Input['DashboardParametersArgs']]:
return pulumi.get(self, "parameters")
@parameters.setter
def parameters(self, value: Optional[pulumi.Input['DashboardParametersArgs']]):
pulumi.set(self, "parameters", value)
@property
@pulumi.getter
def permissions(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['DashboardResourcePermissionArgs']]]]:
"""
<p>A structure that contains the permissions of the dashboard. You can use this structure
for granting permissions by providing a list of IAM action information for each
principal ARN. </p>
<p>To specify no permissions, omit the permissions list.</p>
"""
return pulumi.get(self, "permissions")
@permissions.setter
def permissions(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['DashboardResourcePermissionArgs']]]]):
pulumi.set(self, "permissions", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['DashboardTagArgs']]]]:
"""
<p>Contains a map of the key-value pairs for the resource tag or tags assigned to the
dashboard.</p>
"""
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['DashboardTagArgs']]]]):
pulumi.set(self, "tags", value)
@property
@pulumi.getter(name="themeArn")
def theme_arn(self) -> Optional[pulumi.Input[str]]:
"""
<p>The Amazon Resource Name (ARN) of the theme that is being used for this dashboard. If
you add a value for this field, it overrides the value that is used in the source
entity. The theme ARN must exist in the same AWS account where you create the
dashboard.</p>
"""
return pulumi.get(self, "theme_arn")
@theme_arn.setter
def theme_arn(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "theme_arn", value)
@property
@pulumi.getter(name="versionDescription")
def version_description(self) -> Optional[pulumi.Input[str]]:
"""
<p>A description for the first version of the dashboard being created.</p>
"""
return pulumi.get(self, "version_description")
@version_description.setter
def version_description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "version_description", value)
class Dashboard(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
aws_account_id: Optional[pulumi.Input[str]] = None,
dashboard_id: Optional[pulumi.Input[str]] = None,
dashboard_publish_options: Optional[pulumi.Input[pulumi.InputType['DashboardPublishOptionsArgs']]] = None,
name: Optional[pulumi.Input[str]] = None,
parameters: Optional[pulumi.Input[pulumi.InputType['DashboardParametersArgs']]] = None,
permissions: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['DashboardResourcePermissionArgs']]]]] = None,
source_entity: Optional[pulumi.Input[pulumi.InputType['DashboardSourceEntityArgs']]] = None,
tags: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['DashboardTagArgs']]]]] = None,
theme_arn: Optional[pulumi.Input[str]] = None,
version_description: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
Definition of the AWS::QuickSight::Dashboard Resource Type.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] name: <p>The display name of the dashboard.</p>
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['DashboardResourcePermissionArgs']]]] permissions: <p>A structure that contains the permissions of the dashboard. You can use this structure
for granting permissions by providing a list of IAM action information for each
principal ARN. </p>
<p>To specify no permissions, omit the permissions list.</p>
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['DashboardTagArgs']]]] tags: <p>Contains a map of the key-value pairs for the resource tag or tags assigned to the
dashboard.</p>
:param pulumi.Input[str] theme_arn: <p>The Amazon Resource Name (ARN) of the theme that is being used for this dashboard. If
you add a value for this field, it overrides the value that is used in the source
entity. The theme ARN must exist in the same AWS account where you create the
dashboard.</p>
:param pulumi.Input[str] version_description: <p>A description for the first version of the dashboard being created.</p>
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: DashboardArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Definition of the AWS::QuickSight::Dashboard Resource Type.
:param str resource_name: The name of the resource.
:param DashboardArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(DashboardArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
aws_account_id: Optional[pulumi.Input[str]] = None,
dashboard_id: Optional[pulumi.Input[str]] = None,
dashboard_publish_options: Optional[pulumi.Input[pulumi.InputType['DashboardPublishOptionsArgs']]] = None,
name: Optional[pulumi.Input[str]] = None,
parameters: Optional[pulumi.Input[pulumi.InputType['DashboardParametersArgs']]] = None,
permissions: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['DashboardResourcePermissionArgs']]]]] = None,
source_entity: Optional[pulumi.Input[pulumi.InputType['DashboardSourceEntityArgs']]] = None,
tags: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['DashboardTagArgs']]]]] = None,
theme_arn: Optional[pulumi.Input[str]] = None,
version_description: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = DashboardArgs.__new__(DashboardArgs)
if aws_account_id is None and not opts.urn:
raise TypeError("Missing required property 'aws_account_id'")
__props__.__dict__["aws_account_id"] = aws_account_id
if dashboard_id is None and not opts.urn:
raise TypeError("Missing required property 'dashboard_id'")
__props__.__dict__["dashboard_id"] = dashboard_id
__props__.__dict__["dashboard_publish_options"] = dashboard_publish_options
__props__.__dict__["name"] = name
__props__.__dict__["parameters"] = parameters
__props__.__dict__["permissions"] = permissions
if source_entity is None and not opts.urn:
raise TypeError("Missing required property 'source_entity'")
__props__.__dict__["source_entity"] = source_entity
__props__.__dict__["tags"] = tags
__props__.__dict__["theme_arn"] = theme_arn
__props__.__dict__["version_description"] = version_description
__props__.__dict__["arn"] = None
__props__.__dict__["created_time"] = None
__props__.__dict__["last_published_time"] = None
__props__.__dict__["last_updated_time"] = None
__props__.__dict__["version"] = None
super(Dashboard, __self__).__init__(
'aws-native:quicksight:Dashboard',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'Dashboard':
"""
Get an existing Dashboard resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = DashboardArgs.__new__(DashboardArgs)
__props__.__dict__["arn"] = None
__props__.__dict__["aws_account_id"] = None
__props__.__dict__["created_time"] = None
__props__.__dict__["dashboard_id"] = None
__props__.__dict__["dashboard_publish_options"] = None
__props__.__dict__["last_published_time"] = None
__props__.__dict__["last_updated_time"] = None
__props__.__dict__["name"] = None
__props__.__dict__["parameters"] = None
__props__.__dict__["permissions"] = None
__props__.__dict__["source_entity"] = None
__props__.__dict__["tags"] = None
__props__.__dict__["theme_arn"] = None
__props__.__dict__["version"] = None
__props__.__dict__["version_description"] = None
return Dashboard(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def arn(self) -> pulumi.Output[str]:
"""
<p>The Amazon Resource Name (ARN) of the resource.</p>
"""
return pulumi.get(self, "arn")
@property
@pulumi.getter(name="awsAccountId")
def aws_account_id(self) -> pulumi.Output[str]:
return pulumi.get(self, "aws_account_id")
@property
@pulumi.getter(name="createdTime")
def created_time(self) -> pulumi.Output[str]:
"""
<p>The time that this dataset was created.</p>
"""
return pulumi.get(self, "created_time")
@property
@pulumi.getter(name="dashboardId")
def dashboard_id(self) -> pulumi.Output[str]:
return pulumi.get(self, "dashboard_id")
@property
@pulumi.getter(name="dashboardPublishOptions")
def dashboard_publish_options(self) -> pulumi.Output[Optional['outputs.DashboardPublishOptions']]:
return pulumi.get(self, "dashboard_publish_options")
@property
@pulumi.getter(name="lastPublishedTime")
def last_published_time(self) -> pulumi.Output[str]:
"""
<p>The last time that this | |
hadoop-env has, if Ranger-KMS is installed, we use its values.
rangerKMSServerHosts = self.getHostsWithComponent(
"RANGER_KMS", "RANGER_KMS_SERVER", services, hosts)
if rangerKMSServerHosts is not None and len(rangerKMSServerHosts) > 0:
rangerKMSServerHostsArray = []
for rangeKMSServerHost in rangerKMSServerHosts:
rangerKMSServerHostsArray.append(
rangeKMSServerHost["Hosts"]["host_name"])
keyserverHostsString = ";".join(rangerKMSServerHostsArray)
if "kms-env" in services[
"configurations"] and "kms_port" in services[
"configurations"]["kms-env"]["properties"]:
keyserverPortString = services["configurations"]["kms-env"][
"properties"]["kms_port"]
if keyserverHostsString is not None and len(
keyserverHostsString.strip()) > 0:
urlScheme = "http"
if "ranger-kms-site" in services["configurations"] and \
"ranger.service.https.attrib.ssl.enabled" in services["configurations"]["ranger-kms-site"][
"properties"] and \
services["configurations"]["ranger-kms-site"]["properties"][
"ranger.service.https.attrib.ssl.enabled"].lower() == "true":
urlScheme = "https"
if keyserverPortString is None or len(
keyserverPortString.strip()) < 1:
keyserverPortString = ":9292"
else:
keyserverPortString = ":" + keyserverPortString.strip()
kmsPath = "kms://" + urlScheme + "@" + keyserverHostsString.strip(
) + keyserverPortString + "/kms"
putCoreSiteProperty("hadoop.security.key.provider.path", kmsPath)
putHdfsSiteProperty("dfs.encryption.key.provider.uri", kmsPath)
putHdfsSitePropertyAttribute = self.putPropertyAttribute(
configurations, "hdfs-site")
putCoreSitePropertyAttribute = self.putPropertyAttribute(
configurations, "core-site")
if not "RANGER_KMS" in servicesList:
putCoreSitePropertyAttribute('hadoop.security.key.provider.path',
'delete', 'true')
putHdfsSitePropertyAttribute('dfs.encryption.key.provider.uri',
'delete', 'true')
if "ranger-env" in services["configurations"] and "ranger-hdfs-plugin-properties" in services[
"configurations"] and \
"ranger-hdfs-plugin-enabled" in services["configurations"]["ranger-env"]["properties"]:
putHdfsRangerPluginProperty = self.putProperty(
configurations, "ranger-hdfs-plugin-properties", services)
rangerEnvHdfsPluginProperty = services["configurations"][
"ranger-env"]["properties"]["ranger-hdfs-plugin-enabled"]
putHdfsRangerPluginProperty("ranger-hdfs-plugin-enabled",
rangerEnvHdfsPluginProperty)
def recommendConfigurationsFromHDP23(self, configurations, clusterData,
services, hosts):
"""
Recommend configurations for this service based on HDP 2.3.
"""
putHdfsSiteProperty = self.putProperty(configurations, "hdfs-site",
services)
putHdfsSitePropertyAttribute = self.putPropertyAttribute(
configurations, "hdfs-site")
if ('ranger-hdfs-plugin-properties' in services['configurations']
) and ('ranger-hdfs-plugin-enabled' in services['configurations']
['ranger-hdfs-plugin-properties']['properties']):
rangerPluginEnabled = ''
if 'ranger-hdfs-plugin-properties' in configurations and 'ranger-hdfs-plugin-enabled' in configurations[
'ranger-hdfs-plugin-properties']['properties']:
rangerPluginEnabled = configurations[
'ranger-hdfs-plugin-properties']['properties'][
'ranger-hdfs-plugin-enabled']
elif 'ranger-hdfs-plugin-properties' in services[
'configurations'] and 'ranger-hdfs-plugin-enabled' in services[
'configurations']['ranger-hdfs-plugin-properties'][
'properties']:
rangerPluginEnabled = services['configurations'][
'ranger-hdfs-plugin-properties']['properties'][
'ranger-hdfs-plugin-enabled']
if rangerPluginEnabled and (
rangerPluginEnabled.lower() == 'Yes'.lower()):
putHdfsSiteProperty(
"dfs.namenode.inode.attributes.provider.class",
'org.apache.ranger.authorization.hadoop.RangerHdfsAuthorizer'
)
else:
putHdfsSitePropertyAttribute(
'dfs.namenode.inode.attributes.provider.class', 'delete',
'true')
else:
putHdfsSitePropertyAttribute(
'dfs.namenode.inode.attributes.provider.class', 'delete',
'true')
def recommendConfigurationsFromHDP26(self, configurations, clusterData,
services, hosts):
"""
Recommend configurations for this service based on HDP 2.6
"""
if 'hadoop-env' in services[
'configurations'] and 'hdfs_user' in services[
'configurations']['hadoop-env']['properties']:
hdfs_user = services['configurations']['hadoop-env']['properties'][
'hdfs_user']
else:
hdfs_user = 'hadoop'
if 'ranger-hdfs-plugin-properties' in configurations and 'ranger-hdfs-plugin-enabled' in configurations[
'ranger-hdfs-plugin-properties']['properties']:
ranger_hdfs_plugin_enabled = (
configurations['ranger-hdfs-plugin-properties']['properties']
['ranger-hdfs-plugin-enabled'].lower() == 'Yes'.lower())
elif 'ranger-hdfs-plugin-properties' in services[
'configurations'] and 'ranger-hdfs-plugin-enabled' in services[
'configurations']['ranger-hdfs-plugin-properties'][
'properties']:
ranger_hdfs_plugin_enabled = (
services['configurations']['ranger-hdfs-plugin-properties']
['properties']['ranger-hdfs-plugin-enabled'].lower() == 'Yes'.
lower())
else:
ranger_hdfs_plugin_enabled = False
if ranger_hdfs_plugin_enabled and 'ranger-hdfs-plugin-properties' in services[
'configurations'] and 'REPOSITORY_CONFIG_USERNAME' in services[
'configurations']['ranger-hdfs-plugin-properties'][
'properties']:
self.logger.info("Setting HDFS Repo user for Ranger.")
putRangerHDFSPluginProperty = self.putProperty(
configurations, "ranger-hdfs-plugin-properties", services)
putRangerHDFSPluginProperty("REPOSITORY_CONFIG_USERNAME",
hdfs_user)
else:
self.logger.info("Not setting HDFS Repo user for Ranger.")
def recommendHDFSConfigurationsObserverNamenode(self, configurations, clusterData,
services, hosts):
hdfsSiteProperties = self.getServicesSiteProperties(
services, "hdfs-site")
putHdfsSiteProperty = self.putProperty(configurations, "hdfs-site",
services)
if 'forced-configurations' not in services:
services["forced-configurations"] = []
if hdfsSiteProperties is not None and 'dfs.internal.nameservices' in hdfsSiteProperties:
name_services = hdfsSiteProperties['dfs.internal.nameservices']
name_services = name_services.split(',')
for name_service in name_services:
name = 'dfs.client.failover.proxy.provider.{0}'.format(name_service)
vip_name = 'dfs.client.failover.ipfailover.virtual-address.{0}'.format(name_service)
services["forced-configurations"].append({
"type": "hdfs-site",
"name": name
})
putHdfsSiteProperty(name, 'org.apache.hadoop.hdfs.server.namenode.ha.ObserverReadProxyProvider')
def recommendHDFSConfigurationsLDAP(self, configurations, clusterData,
services, hosts):
putCoreSiteProperty = self.putProperty(configurations, "core-site",
services)
ldap_hosts = self.getComponentHostNames(services, "OPENLDAP", "OPENLDAP_MASTER")
if ldap_hosts is not None and len(ldap_hosts) > 0:
ldap_hosts.sort()
if 'openldap-config' in configurations and 'binddn' in \
configurations['openldap-config']['properties']:
ldap_user = configurations['openldap-config']['properties']['binddn']
ldap_password = configurations['openldap-config']['properties']['ldap.password']
ldap_basedn = configurations['openldap-config']['properties']['ldap.domain']
ldap_url = 'ldap://' + ' ldap://'.join(ldap_hosts)
putCoreSiteProperty("hadoop.security.group.mapping", 'org.apache.hadoop.security.LdapGroupsMapping')
putCoreSiteProperty("hadoop.security.group.mapping.ldap.bind.user", ldap_user)
putCoreSiteProperty("hadoop.security.group.mapping.ldap.bind.password", ldap_password)
putCoreSiteProperty("hadoop.security.group.mapping.ldap.url", ldap_url)
putCoreSiteProperty("hadoop.security.group.mapping.ldap.base", ldap_basedn)
def recommendHDFSConfigurationsTune(self, configurations, clusterData,
services, hosts):
hdfsSiteProperties = self.getServicesSiteProperties(
services, "hdfs-site")
putHdfsSiteProperty = self.putProperty(configurations, "hdfs-site",
services)
if 'forced-configurations' not in services:
services["forced-configurations"] = []
if hdfsSiteProperties is not None and 'dfs.internal.nameservices' in hdfsSiteProperties:
name_services = hdfsSiteProperties['dfs.internal.nameservices']
name_services = name_services.split(',')
for name_service in name_services:
nn_unique_ids_key = 'dfs.ha.namenodes.' + name_service
nn_unique_ids = hdfsSiteProperties[nn_unique_ids_key].split(
',')
NAMENODE_RPC_FRAGMENT = 'dfs.namenode.rpc-address.{0}.{1}'
SERVICERPC_FRAGMENT = 'dfs.namenode.servicerpc-address.{0}.{1}'
LIFELINE_FRAGMENT = 'dfs.namenode.lifeline.rpc-address.{0}.{1}'
for nn_unique_id in nn_unique_ids:
rpc_key = NAMENODE_RPC_FRAGMENT.format(
name_service, nn_unique_id)
rpc_value = str(hdfsSiteProperties[rpc_key])
rpc_host = rpc_value.split(":")[0]
servicerpc_address_key = SERVICERPC_FRAGMENT.format(
name_service, nn_unique_id)
servicerpc_address_value = rpc_host + ':8040'
lifeline_address_key = LIFELINE_FRAGMENT.format(
name_service, nn_unique_id)
lifeline_address_value = rpc_host + ':8050'
services["forced-configurations"].append({
"type": "hdfs-site",
"name": servicerpc_address_key
})
putHdfsSiteProperty(servicerpc_address_key, servicerpc_address_value)
services["forced-configurations"].append({
"type": "hdfs-site",
"name": lifeline_address_key
})
putHdfsSiteProperty(lifeline_address_key, lifeline_address_value)
def recommendHDFSConfigurationsFromHDP31(self, configurations, clusterData,
services, hosts):
putHdfsSiteProperty = self.putProperty(configurations, "hdfs-site",
services)
ranger_hdfs_plugin_enabled = False
if 'ranger-hdfs-plugin-properties' in configurations and 'ranger-hdfs-plugin-enabled' in \
configurations['ranger-hdfs-plugin-properties']['properties']:
ranger_hdfs_plugin_enabled = (
configurations['ranger-hdfs-plugin-properties']['properties']
['ranger-hdfs-plugin-enabled'].lower() == 'Yes'.lower())
elif 'ranger-hdfs-plugin-properties' in services['configurations'] and 'ranger-hdfs-plugin-enabled' in \
services['configurations']['ranger-hdfs-plugin-properties']['properties']:
ranger_hdfs_plugin_enabled = (
services['configurations']['ranger-hdfs-plugin-properties']
['properties']['ranger-hdfs-plugin-enabled'].lower() == 'Yes'.
lower())
if ranger_hdfs_plugin_enabled:
putHdfsSiteProperty('dfs.permissions.ContentSummary.subAccess',
'true')
else:
putHdfsSiteProperty('dfs.permissions.ContentSummary.subAccess',
'false')
def recommendConfigurationsForSSO(self, configurations, clusterData,
services, hosts):
ambari_configuration = self.get_ambari_configuration(services)
ambari_sso_details = ambari_configuration.get_ambari_sso_details(
) if ambari_configuration else None
if ambari_sso_details and ambari_sso_details.is_managing_services():
putHdfsSiteProperty = self.putProperty(configurations, "hdfs-site",
services)
# If SSO should be enabled for this service
if ambari_sso_details.should_enable_sso('HDFS'):
if (self.is_kerberos_enabled(configurations, services)):
putHdfsSiteProperty(
'hadoop.http.authentication.type',
"org.apache.hadoop.security.authentication.server.JWTRedirectAuthenticationHandler"
)
putHdfsSiteProperty(
'hadoop.http.authentication.authentication.provider.url',
ambari_sso_details.get_sso_provider_url())
putHdfsSiteProperty(
'hadoop.http.authentication.public.key.pem',
ambari_sso_details.get_sso_provider_certificate(
False, True))
else:
# Since Kerberos is not enabled, we can not enable SSO
self.logger.warn(
"Enabling SSO integration for HDFS requires Kerberos, Since Kerberos is not enabled, SSO integration is not being recommended."
)
putHdfsSiteProperty('hadoop.http.authentication.type',
"simple")
pass
# If SSO should be disabled for this service
elif ambari_sso_details.should_disable_sso('HDFS'):
if (self.is_kerberos_enabled(configurations, services)):
putHdfsSiteProperty('hadoop.http.authentication.type',
"kerberos")
else:
putHdfsSiteProperty('hadoop.http.authentication.type',
"simple")
def is_kerberos_enabled(self, configurations, services):
"""
Tests if HDFS has Kerberos enabled by first checking the recommended changes and then the
existing settings.
:type configurations dict
:type services dict
:rtype bool
"""
return self._is_kerberos_enabled(configurations) or \
(services and 'configurations' in services and self._is_kerberos_enabled(services['configurations']))
def _is_kerberos_enabled(self, config):
"""
Detects if HDFS has Kerberos enabled given a dictionary of configurations.
:type config dict
:rtype bool
"""
return config and \
(
(
"hdfs-site" in config and
'hadoop.security.authentication' in config['hdfs-site']["properties"] and
config['hdfs-site']["properties"]['hadoop.security.authentication'] == 'kerberos'
) or (
"core-site" in config and
'hadoop.security.authentication' in config['core-site']["properties"] and
config['core-site']["properties"]['hadoop.security.authentication'] == 'kerberos'
)
)
class HDFSValidator(service_advisor.ServiceAdvisor):
"""
HDFS Validator checks the correctness of properties whenever the service is first added or the user attempts to
change configs via the UI.
"""
def __init__(self, *args, **kwargs):
self.as_super = super(HDFSValidator, self)
self.as_super.__init__(*args, **kwargs)
self.validators = [
("hdfs-site", self.validateHDFSConfigurationsFromHDP206),
("hadoop-env", self.validateHadoopEnvConfigurationsFromHDP206),
("core-site", self.validateHDFSCoreSiteFromHDP206),
("hdfs-site", self.validateHDFSConfigurationsFromHDP22),
("hadoop-env", self.validateHadoopEnvConfigurationsFromHDP22),
("ranger-hdfs-plugin-properties",
self.validateHDFSRangerPluginConfigurationsFromHDP22),
("hdfs-site", self.validateRangerAuthorizerFromHDP23)
]
# **********************************************************
# Example of how to add a function that validates a certain config type.
# If the same config type has multiple functions, can keep adding tuples to self.validators
# self.validators.append(("hadoop-env", self.sampleValidator))
def sampleValidator(self, properties, recommendedDefaults, configurations,
services, hosts):
"""
Example of a validator function other other Service Advisors to emulate.
:return: A list of configuration validation problems.
"""
validationItems = []
'''
Item is a simple dictionary.
Two functions can be used to construct it depending on the log level: WARN|ERROR
E.g.,
self.getErrorItem(message) or self.getWarnItem(message)
item = {"level": "ERROR|WARN", "message": "value"}
'''
validationItems.append({
"config-name":
"my_config_property_name",
"item":
self.getErrorItem(
"My custom message in method %s" % inspect.stack()[0][3])
})
return self.toConfigurationValidationProblems(validationItems,
"hadoop-env")
def validateHDFSConfigurationsFromHDP206(self, properties,
recommendedDefaults,
configurations, services, hosts):
"""
This was copied from HDP 2.0.6; validate hdfs-site
:return: A list of configuration validation problems.
"""
clusterEnv = self.getSiteProperties(configurations, "cluster-env")
validationItems = []
return self.toConfigurationValidationProblems(validationItems,
"hdfs-site")
def validatorOneDataDirPerPartition(self, properties, propertyName,
services, hosts, clusterEnv):
if not propertyName in properties:
return self.getErrorItem("Value should be set")
dirs = properties[propertyName]
if not (clusterEnv and "one_dir_per_partition" in clusterEnv
and clusterEnv["one_dir_per_partition"].lower() == "true"):
return None
dataNodeHosts = self.getDataNodeHosts(services, hosts)
warnings = set()
for host in dataNodeHosts:
hostName = host["Hosts"]["host_name"]
mountPoints = []
for diskInfo in host["Hosts"]["disk_info"]:
mountPoints.append(diskInfo["mountpoint"])
if get_mounts_with_multiple_data_dirs(mountPoints, dirs):
# A detailed message can be too long on large clusters:
# warnings.append("Host: " + hostName + "; Mount: " + mountPoint + "; Data directories: " + ", ".join(dirList))
warnings.add(hostName)
break
if len(warnings) > 0:
return self.getWarnItem(
"cluster-env/one_dir_per_partition is enabled but there are multiple data directories on the same mount. Affected hosts: {0}"
.format(", ".join(sorted(warnings))))
return None
def validateHadoopEnvConfigurationsFromHDP206(
self, properties, recommendedDefaults, configurations, services,
hosts):
"""
This was copied from HDP 2.0.6; validate hadoop-env
:return: A list of configuration validation problems.
"""
validationItems = [
{
"config-name":
'namenode_opt_newsize',
"item":
self.validatorLessThenDefaultValue(
properties, recommendedDefaults,
'namenode_opt_newsize')
},
{
"config-name":
'namenode_opt_maxnewsize',
"item":
self.validatorLessThenDefaultValue(
properties, recommendedDefaults,
'namenode_opt_maxnewsize')
}]
return self.toConfigurationValidationProblems(validationItems,
"hadoop-env")
def validateHDFSCoreSiteFromHDP206(self, properties, recommendedDefaults,
configurations, services, hosts):
"""
This was copied from HDP 2.0.6; validate core-site
:return: A list of configuration validation problems.
"""
validationItems = []
validationItems.extend(
self.getHadoopProxyUsersValidationItems(properties, services,
hosts, configurations))
validationItems.extend(
self.getAmbariProxyUsersForHDFSValidationItems(
properties, services))
return self.toConfigurationValidationProblems(validationItems,
"core-site")
def getAmbariProxyUsersForHDFSValidationItems(self, properties, services):
validationItems = []
servicesList = self.get_services_list(services)
if "HDFS" in servicesList:
ambari_user = self.getAmbariUser(services)
props = ("hadoop.proxyuser.{0}.hosts".format(ambari_user),
"hadoop.proxyuser.{0}.groups".format(ambari_user))
for prop in | |
"""ndb model definitions
Many of these are similar to models in models.py, which are Django models. We
need these ndb versions for use with runtime: python27, which is required by
endpoints.
"""
import collections
import logging
import math
import os
import webapp2
from google.appengine.api import search
from google.appengine.ext import ndb, blobstore
import general_utils
# TODO: move to global config
SALES_TAX_RATE = float(os.environ.get('SALES_TAX_RATE', 0.0925))
def _SortItemsWithSections(items):
"""Sort a list of items so they look OK in the UI."""
items.sort(
key=lambda x: (x.order_form_section or None, x.name))
prev_section = None
for i in items:
new_section = i.order_form_section or None
if prev_section != new_section:
i.first_in_section = True
prev_section = new_section
class _ActiveItems(object):
"""Similar to backreference "*_set" properties in the old db interface."""
def __init__(self, ref, kind_cls):
"""
Args:
ref: instance of a model that is referenced by another kind of model
kind_cls: ndb kind to be selected, like in Key(kind=kind_cls)
"""
self._query = kind_cls.query(kind_cls.site == ref.key,
kind_cls.state != 'new',
kind_cls.state != 'deleted',
kind_cls.state != 'Deleted'
)
def Count(self):
return self._query.count()
def Items(self):
for item in sorted(self._query,
key=lambda o: o.modified, reverse=True):
yield item
def __iter__(self):
return self.Items()
class SearchableModel(ndb.Model):
def get_search_result_headline(self):
return "{} id={}".format(type(self), self.key.integer_id())
def get_search_result_detail_lines(self):
return ["{}: {}".format(prop, getattr(self, prop)) for prop in self._properties if hasattr(self, prop)]
@staticmethod
def get_search_order():
"""override with lower number to search this index first"""
return 1e10
def get_canonical_request_response(self, request):
"""override to build a default response to requests whose search resolve to this model"""
raise NotImplementedError("{} has no canonical request response defined".format(self.__class__.__name__))
def get_indexed_fields(self):
fields = []
for prop_name, prop in self._properties.items():
if not hasattr(self, prop_name):
continue
value = getattr(self, prop_name)
if value is None:
continue
prop_type = type(prop)
value_processor = lambda v: v
if prop_type in (ndb.TextProperty, ndb.StringProperty):
search_type = search.TextField
elif prop_type in (ndb.FloatProperty, ndb.IntegerProperty):
search_type = search.NumberField
elif prop_type in (ndb.DateProperty, ndb.DateTimeProperty):
search_type = search.DateField
elif prop_type == ndb.UserProperty:
search_type = search.TextField
value_processor = lambda v: v.email()
elif prop_type == ndb.KeyProperty:
search_type = search.TextField
value_processor = lambda v: unicode(v.id())
elif prop_type == ndb.BooleanProperty:
search_type = search.AtomField
value_processor = lambda v: unicode(v)
else:
logging.warning("type {} not supported {}".format(prop_type, SearchableModel.__name__))
continue
if prop._repeated:
for s in value:
fields.append(search_type(name=prop_name, value=value_processor(s)))
else:
try:
fields.append(search_type(name=prop_name, value=value_processor(value)))
except TypeError:
raise
return fields
def _post_put_hook(self, future):
put_result = future.get_result() # blocks on put but not a bad idea anyway
model_key_id = put_result.integer_id()
self.index(model_key_id)
def index(self, model_key_id):
index_name = self.__class__.__name__
index = search.Index(index_name)
self.delete_by_model_key_id(model_key_id)
fields = [
search.AtomField(name="model_name", value=index_name),
search.AtomField(name="model_key_id", value=unicode(model_key_id)),
search.TextField(name='headline', value=self.get_search_result_headline())
]
for detail in self.get_search_result_detail_lines():
fields.append(search.TextField(name='details', value=detail))
fields.extend(self.get_indexed_fields())
doc = search.Document(doc_id=unicode(self.key.integer_id()), fields=fields)
index.put(doc)
@classmethod
def delete_by_model_key_id(cls, model_key_id):
index_name = cls.__name__
index = search.Index(index_name)
index.delete(document_ids=map(lambda d: d.doc_id, index.search("model_key_id={}".format(model_key_id))))
@classmethod
def _post_delete_hook(cls, key, future):
cls.delete_by_model_key_id(key.id())
class Jurisdiction(SearchableModel):
"""A jurisdiction name for reporting purposes."""
name = ndb.StringProperty()
def __unicode__(self):
return self.name
def __str__(self):
return self.name
class ProgramType(SearchableModel):
"""
year-independent representation of a program
names are like NRD, Teambuild and Safe
there should only be a handful of these and
they should be relatively static
"""
name = ndb.StringProperty()
@staticmethod
def get_or_create(name):
"""
returns a tuple of the (possibly new) instance and a boolean indicating whether
it was created
WARNING: This method puts the new model if it does not yet exist
:param name: name of the program type
:type name: str
:return: tuple of instance and boolean (true if created, false otherwise)
:rtype: tuple[ProgramType, bool]
"""
created = False
assert isinstance(name, str) or isinstance(name, unicode)
result = ProgramType.query().filter(ProgramType.name == name).get()
if result is None:
created = True
result = ProgramType(name=name)
result.key = ndb.Key(ProgramType, name)
result.put()
return result, created
class Program(SearchableModel):
"""Identifies a program type like "National Rebuilding Day" and its year.
Programs with status 'Active' will be visible to Captains.
The name property is shorthand for the year and program type like "2012 NRD".
"""
ACTIVE_STATUS = "Active"
INACTIVE_STATUS = "Inactive"
STATUSES = (ACTIVE_STATUS, INACTIVE_STATUS)
program_type = ndb.KeyProperty(ProgramType)
year = ndb.IntegerProperty(choices=range(1987, 2500))
status = ndb.StringProperty(choices=STATUSES, default=STATUSES[0])
name = ndb.StringProperty()
def get_sort_key(self):
return -self.year, self.program_type
def put(self, *a, **k):
program_type_name = self.program_type.get().name
self.name = "{} {}".format(self.year, program_type_name)
self.status = self.status or Program.ACTIVE_STATUS
return super(Program, self).put(*a, **k)
@staticmethod
def from_fully_qualified_name(fully_qualified_name):
query = Program.query()
query = query.filter(Program.name == fully_qualified_name)
return query.get()
@staticmethod
def get_or_create(program_type_key, year, status=None):
"""
returns a tuple of the (possibly new) instance and a boolean indicating whether
it was created
WARNING: This method puts the new model if it does not yet exist
:param program_type_key: program type
:type program_type_key: ndb.Key
:param year: year
:type year: int
:param status: status
:type status: str
:return: tuple of instance and boolean (true if created, false otherwise)
:rtype: tuple[Program, bool]
"""
assert isinstance(year, int) or isinstance(year, long)
assert status is None or status in Program.STATUSES
created = False
query = Program.query()
query = query.filter(Program.program_type == program_type_key)
query = query.filter(Program.year == year)
result = query.get()
if result is None:
created = True
result = Program(program_type=program_type_key, year=year, status=status)
result.put()
elif status is not None:
assert result.status == status
return result, created
class Staff(SearchableModel):
"""Minimal variant of the Staff model.
For use in authorization within endpoints.
"""
name = ndb.StringProperty()
email = ndb.StringProperty(required=True)
program_selected = ndb.StringProperty()
program_selected_key = ndb.KeyProperty(kind=Program)
last_welcome = ndb.DateProperty(auto_now=True)
notes = ndb.TextProperty()
since = ndb.DateProperty(auto_now_add=True)
class Captain(SearchableModel):
"""A work captain."""
name = ndb.StringProperty(required=True) # "<NAME>"
# Using the UserProperty seems to be more hassle than it's worth.
# I was getting errors about users that didn't exist when loading sample
# data.
email = ndb.StringProperty() # "<EMAIL>"
rooms_id = ndb.StringProperty() # "R00011"
phone_mobile = ndb.StringProperty()
phone_work = ndb.StringProperty()
phone_home = ndb.StringProperty()
phone_fax = ndb.StringProperty()
phone_other = ndb.StringProperty()
tshirt_size = ndb.StringProperty(choices=(
'Small',
'Medium',
'Large',
'X-Large',
'2XL',
'3XL'))
notes = ndb.TextProperty()
last_welcome = ndb.DateTimeProperty()
modified = ndb.DateTimeProperty(auto_now=True)
last_editor = ndb.UserProperty(auto_current_user=True)
search_prefixes = ndb.StringProperty(repeated=True)
def put(self, *a, **k):
prefixes = set()
if self.name:
prefixes.add(self.name)
for part in self.name.split():
prefixes.add(part)
for i in xrange(1, 7):
prefixes.add(part[:i])
if self.email:
prefixes.add(self.email)
for i in xrange(1, 7):
prefixes.add(self.email[:i])
self.search_prefixes = [p.lower() for p in prefixes]
return super(Captain, self).put(*a, **k)
def __unicode__(self):
return self.name
def Label(self):
return "%s <%s>" % (self.name, self.email)
class Supplier(SearchableModel):
"""A supplier of Items."""
name = ndb.StringProperty(required=True)
email = ndb.StringProperty()
address = ndb.StringProperty()
phone1 = ndb.StringProperty()
phone2 = ndb.StringProperty()
notes = ndb.TextProperty()
since = ndb.DateProperty(auto_now_add=True)
active = ndb.StringProperty(choices=('Active', 'Inactive'),
default='Active')
visibility = ndb.StringProperty(choices=('Everyone', 'Staff Only'),
default='Everyone')
def __unicode__(self):
return self.name
def __str__(self):
return self.name
class OrderSheet(SearchableModel):
"""Set of items commonly ordered together.
Corresponds to one of the old paper forms, like the Cleaning Supplies form.
"""
name = ndb.StringProperty()
visibility = ndb.StringProperty(choices=('Everyone', 'Staff Only', 'Inactive'),
default='Everyone')
supports_extra_name_on_order = ndb.BooleanProperty(default=False)
supports_internal_invoice = ndb.BooleanProperty(default=False)
code = ndb.StringProperty()
instructions = ndb.TextProperty(default='')
logistics_instructions = ndb.TextProperty(default='')
default_supplier = ndb.KeyProperty(kind=Supplier)
# Choose one of the next three.
delivery_options = ndb.StringProperty(choices=['Yes', 'No'], default='No')
pickup_options = ndb.StringProperty(choices=['Yes', 'No'], default='No')
borrow_options = ndb.StringProperty(choices=['Yes', 'No'], default='No')
retrieval_options = ndb.StringProperty(choices=['Yes', 'No'], default='No')
def __unicode__(self):
return '%s' % (self.name)
def HasLogistics(self):
return (self.delivery_options == 'Yes' or
self.pickup_options == 'Yes' or
self.borrow_options == 'Yes' or
self.retrieval_options == 'Yes')
@property
def item_set(self):
return Item.query(Item.appears_on_order_form == self.key)
class Item(SearchableModel):
"""Represents a type of thing that may be in the inventory or possible to order."""
bar_code_number = ndb.IntegerProperty()
# bar_code_number.unique = True
name = ndb.StringProperty(required=True)
# name.unique = True
appears_on_order_form = ndb.KeyProperty(kind=OrderSheet)
order_form_section = ndb.StringProperty()
description = ndb.StringProperty()
# 'Each' 'Box' 'Pair' etc
measure = ndb.StringProperty(
choices=('Each', 'Roll', 'Bottle', 'Box', 'Pair', 'Board', 'Bundle',
'Bag', 'Ton', 'Yard', 'Sheet', 'Cartridge', 'Tube', 'Tub',
'Sq. Yds.', 'Gallon', 'Section', 'Home', 'Box', 'Drop-off',
'', 'Other'))
# Dollars.
unit_cost = ndb.FloatProperty()
must_be_returned = ndb.StringProperty(choices=['Yes', 'No'], default='No')
picture = ndb.BlobProperty()
thumbnail = ndb.BlobProperty()
supplier = ndb.KeyProperty(kind=Supplier)
supplier_part_number = ndb.StringProperty()
url = ndb.StringProperty()
last_editor = ndb.UserProperty()
created = ndb.DateTimeProperty(auto_now_add=True)
modified = ndb.DateTimeProperty(auto_now=True)
supports_extra_name_on_order = ndb.BooleanProperty(default=False)
def __unicode__(self):
return self.description
def VisibleSortableLabel(self, label):
"""Strips numeric prefixes used for sorting.
Labels may have a digit prefix which is used for sorting, but
should not be shown to users.
"""
if not label:
return ''
parts = label.split()
if len(parts) > 0 and parts[0].isdigit():
return ' '.join(parts[1:])
return label
def VisibleName(self):
return self.VisibleSortableLabel(self.name)
def VisibleOrderFormSection(self):
return self.VisibleSortableLabel(self.order_form_section)
def SupportsName(self):
return (self.supports_extra_name_on_order
or self.appears_on_order_form.get().supports_extra_name_on_order)
class UploadedDocument(ndb.Model):
filename | |
= cal_date.tm_mon
#-- convert date to year decimal
self.time[t], = gravity_toolkit.time.convert_calendar_decimal(
cal_date.tm_year, cal_date.tm_mon)
#-- estimated GRACE/GRACE-FO month
#-- Accelerometer shutoffs complicate the month number calculation
self.month[t] = gravity_toolkit.time.calendar_to_grace(year,month)
#-- will only advance in time after reading the
#-- order 1 coefficients (t+0=t)
t += m
#-- The 'Special Months' (Nov 2011, Dec 2011 and April 2012) with
#-- Accelerometer shutoffs make the relation between month number
#-- and date more complicated as days from other months are used
self.month = gravity_toolkit.time.adjust_months(self.month)
#-- return the geocenter harmonics
return self
def from_netCDF4(self, geocenter_file, **kwargs):
"""
Reads geocenter file and extracts dates and spherical harmonic data
from a netCDF4 file
Arguments
---------
geocenter_file: degree 1 netCDF4 file
Keyword arguments
-----------------
compression: netCDF4 file is compressed or streaming as bytes
References
----------
<NAME>, and <NAME>, "Improved estimates of geocenter
variability from time-variable gravity and ocean model outputs,
Remote Sensing, 11(18), 2108, (2019).
doi:10.3390/rs11182108
"""
kwargs.setdefault('compression',None)
#-- set filename
self.case_insensitive_filename(geocenter_file)
#-- Open the netCDF4 file for reading
if (kwargs['compression'] == 'gzip'):
#-- read gzipped file as in-memory (diskless) netCDF4 dataset
with gzip.open(self.filename,'r') as f:
fileID = netCDF4.Dataset(uuid.uuid4().hex,
memory=f.read())
elif (kwargs['compression'] == 'bytes'):
#-- read as in-memory (diskless) netCDF4 dataset
fileID = netCDF4.Dataset(uuid.uuid4().hex,
memory=self.filename.read())
else:
fileID = netCDF4.Dataset(self.filename, 'r')
#-- Getting the data from each netCDF4 variable
DEG1 = {}
#-- converting netCDF4 objects into numpy arrays
for key,val in fileID.variables.items():
DEG1[key] = val[:].copy()
#-- close the netCDF4 file
fileID.close()
#-- return the geocenter harmonics
return self.from_dict(DEG1)
def copy(self, **kwargs):
"""
Copy a geocenter object to a new geocenter object
Keyword arguments
-----------------
fields: default keys in geocenter object
"""
#-- set default keyword arguments
kwargs.setdefault('fields',['time','month',
'C10','C11','S11','eC10','eC11','eS11',
'X','Y','Z'])
temp = geocenter()
#-- try to assign variables to self
for key in kwargs['fields']:
try:
val = getattr(self, key)
setattr(temp, key, np.copy(val))
except AttributeError:
pass
return temp
def from_dict(self, temp, **kwargs):
"""
Convert a dictionary object to a geocenter object
Arguments
---------
dictionary object to be converted
Keyword arguments
-----------------
fields: default keys in dictionary
"""
#-- set default keyword arguments
kwargs.setdefault('fields',['time','month',
'C10','C11','S11','eC10','eC11','eS11',
'X','Y','Z','X_sigma','Y_sigma','Z_sigma'])
#-- assign dictionary variables to self
for key in kwargs['fields']:
try:
setattr(self, key, temp[key].copy())
except (AttributeError, KeyError):
pass
return self
def from_harmonics(self, temp, **kwargs):
"""
Convert a harmonics object to a geocenter object
Arguments
---------
harmonics object to be converted
Keyword arguments
-----------------
fields: default keys in harmonics object
"""
#-- reassign shape and ndim attributes
temp.update_dimensions()
#-- set default keyword arguments
kwargs.setdefault('fields',['time','month','filename'])
#-- try to assign variables to self
for key in kwargs['fields']:
try:
val = getattr(temp, key)
setattr(self, key, np.copy(val))
except AttributeError:
pass
#-- get spherical harmonic objects
if (temp.ndim == 2):
self.C10 = np.copy(temp.clm[1,0])
self.C11 = np.copy(temp.clm[1,1])
self.S11 = np.copy(temp.slm[1,1])
elif (temp.ndim == 3):
self.C10 = np.copy(temp.clm[1,0,:])
self.C11 = np.copy(temp.clm[1,1,:])
self.S11 = np.copy(temp.slm[1,1,:])
#-- return the geocenter object
return self
def from_matrix(self, clm, slm):
"""
Converts spherical harmonic matrices to a geocenter object
Arguments
---------
clm: cosine spherical harmonics of degree 1
slm: sine spherical harmonics of degree 1
"""
#-- verify dimensions
clm = np.atleast_3d(clm)
slm = np.atleast_3d(slm)
#-- output geocenter object
self.C10 = np.copy(clm[1,0,:])
self.C11 = np.copy(clm[1,1,:])
self.S11 = np.copy(slm[1,1,:])
return self
def to_dict(self, **kwargs):
"""
Convert a geocenter object to a dictionary object
Keyword arguments
-----------------
fields: default attributes in geocenter object
"""
#-- output dictionary
temp = {}
#-- set default keyword arguments
kwargs.setdefault('fields',['time','month',
'C10','C11','S11','eC10','eC11','eS11',
'X','Y','Z','X_sigma','Y_sigma','Z_sigma'])
#-- assign dictionary variables to self
for key in kwargs['fields']:
try:
val = getattr(self, key)
except (AttributeError, KeyError):
pass
else:
temp[key] = copy.copy(val)
#-- return the dictionary object
return temp
def to_matrix(self):
"""
Converts a geocenter object to spherical harmonic matrices
"""
#-- verify dimensions
_,nt = np.shape(np.atleast_2d(self.C10))
#-- output spherical harmonics
clm = np.zeros((2,2,nt))
slm = np.zeros((2,2,nt))
#-- copy geocenter harmonics to matrices
clm[1,0,:] = np.atleast_2d(self.C10)
clm[1,1,:] = np.atleast_2d(self.C11)
slm[1,1,:] = np.atleast_2d(self.S11)
return dict(clm=clm, slm=slm)
def to_cartesian(self, kl=0.0):
"""
Converts normalized spherical harmonics to cartesian geocenter variations
Keyword arguments
-----------------
kl: gravitational load love number of degree 1
"""
#-- Stokes Coefficients to cartesian geocenter
try:
self.Z = self.C10*self.radius*np.sqrt(3.0)/(1.0 + kl)
self.X = self.C11*self.radius*np.sqrt(3.0)/(1.0 + kl)
self.Y = self.S11*self.radius*np.sqrt(3.0)/(1.0 + kl)
except Exception as e:
pass
#-- convert errors to cartesian geocenter
try:
self.Z_sigma = self.eC10*self.radius*np.sqrt(3.0)/(1.0 + kl)
self.X_sigma = self.eC11*self.radius*np.sqrt(3.0)/(1.0 + kl)
self.Y_sigma = self.eS11*self.radius*np.sqrt(3.0)/(1.0 + kl)
except Exception as e:
pass
return self
def to_cmwe(self, kl=0.0):
"""
Converts normalized spherical harmonics to centimeters water equivalent
Keyword arguments
-----------------
kl: gravitational load love number of degree 1
"""
#-- Average Density of the Earth [g/cm^3]
rho_e = 5.517
#-- Average Radius of the Earth [cm]
rad_e = 6.371e8
#-- convert to centimeters water equivalent
self.C10 *= (rho_e*rad_e)/(1.0 + kl)
self.C11 *= (rho_e*rad_e)/(1.0 + kl)
self.S11 *= (rho_e*rad_e)/(1.0 + kl)
#-- convert errors to centimeters water equivalent
try:
self.eC10 *= (rho_e*rad_e)/(1.0 + kl)
self.eC11 *= (rho_e*rad_e)/(1.0 + kl)
self.eS11 *= (rho_e*rad_e)/(1.0 + kl)
except Exception as e:
pass
return self
def to_mmwe(self, kl=0.0):
"""
Converts normalized spherical harmonics to millimeters water equivalent
Keyword arguments
-----------------
kl: gravitational load love number of degree 1
"""
self.to_cmwe(kl=kl)
#-- convert to millimeters water equivalent
self.C10 *= 10.0
self.C11 *= 10.0
self.S11 *= 10.0
#-- convert errors to millimeters water equivalent
try:
self.eC10 *= 10.0
self.eC11 *= 10.0
self.eS11 *= 10.0
except Exception as e:
pass
return self
def from_cartesian(self, kl=0.0):
"""
Converts cartesian geocenter variations to normalized spherical harmonics
Keyword arguments
-----------------
kl: gravitational load love number of degree 1
"""
#-- cartesian geocenter to Stokes Coefficients
self.C10 = (1.0 + kl)*self.Z/(self.radius*np.sqrt(3.0))
self.C11 = (1.0 + kl)*self.X/(self.radius*np.sqrt(3.0))
self.S11 = (1.0 + kl)*self.Y/(self.radius*np.sqrt(3.0))
#-- convert cartesian geocenter to stokes coefficients
try:
self.eC10 = (1.0 + kl)*self.Z_sigma/(self.radius*np.sqrt(3.0))
self.eC11 = (1.0 + kl)*self.X_sigma/(self.radius*np.sqrt(3.0))
self.eS11 = (1.0 + kl)*self.Y_sigma/(self.radius*np.sqrt(3.0))
except Exception as e:
pass
return self
def from_cmwe(self, kl=0.0):
"""
Normalizes spherical harmonics from centimeters water equivalent (cmwe)
Keyword arguments
-----------------
kl: gravitational load love number of degree 1
"""
#-- Average Density of the Earth [g/cm^3]
rho_e = 5.517
#-- Average Radius of the Earth [cm]
rad_e = 6.371e8
#-- convert from centimeters water equivalent
self.C10 *= (1.0 + kl)/(rho_e*rad_e)
self.C11 *= (1.0 + kl)/(rho_e*rad_e)
self.S11 *= (1.0 + kl)/(rho_e*rad_e)
#-- convert errors from centimeters water equivalent
try:
self.eC10 *= (1.0 + kl)/(rho_e*rad_e)
self.eC11 *= (1.0 + kl)/(rho_e*rad_e)
self.eS11 *= (1.0 + kl)/(rho_e*rad_e)
except Exception as e:
pass
return self
def from_mmwe(self, kl=0.0):
"""
Normalizes spherical harmonics from millimeters water equivalent (mmwe)
Keyword arguments
-----------------
kl: gravitational load love number of degree 1
"""
self.from_cmwe(kl=kl)
#-- convert from millimeters water equivalent
self.C10 /= 10.0
self.C11 /= 10.0
self.S11 /= 10.0
#-- convert errors from centimeters water equivalent
try:
self.eC10 /= 10.0
self.eC11 /= 10.0
self.eS11 /= 10.0
except Exception as e:
pass
return self
def mean(self, apply=False, indices=Ellipsis):
"""
Compute mean gravitational field and remove from data if specified
Keyword arguments
-----------------
apply: remove the mean field from the input harmonics
indices: of input harmonics object to compute mean
"""
temp = geocenter()
#-- calculate mean static field
temp.C10 = np.mean(self.C10[indices])
temp.C11 = np.mean(self.C11[indices])
temp.S11 = np.mean(self.S11[indices])
#-- calculating the time-variable gravity field by removing
#-- the static component of the gravitational field
if apply:
self.C10 -= temp.C10
self.C11 -= temp.C11
self.S11 -= temp.S11
#-- calculate mean of temporal variables
for key in ['time','month']:
try:
val = getattr(self, key)
setattr(temp, key, np.mean(val[indices]))
except:
continue
#-- return the mean field
return temp
def add(self, temp):
"""
Add two geocenter objects
Arguments
---------
temp: geocenter object to be added
"""
self.C10 += temp.C10
self.C11 += temp.C11
self.S11 += temp.S11
return self
def subtract(self, temp):
"""
Subtract one geocenter object from another
Arguments
---------
temp: geocenter object to be subtracted
"""
self.C10 -= temp.C10
self.C11 -= temp.C11
self.S11 -= temp.S11
return self
def multiply(self, temp):
"""
Multiply two geocenter objects
Arguments
---------
temp: geocenter object to be multiplied
"""
| |
<reponame>bp/resqpy
"""vdb.py: Module providing functions for reading from VDB datasets."""
version = '15th March 2021'
# Nexus is a registered trademark of the Halliburton Company
import logging
log = logging.getLogger(__name__)
import glob
import os
import zipfile as zf
from struct import unpack
import numpy as np
import resqpy.olio.grid_functions as gf
import resqpy.olio.xml_et as rqet
null_uint32 = 4294967295 # -1 if interpreted as int32
key_dict = { # vdb key character mapping to: (numpy_dtype, size_in_bytes, unpack_format_ch)
'R': ('float32', 4, 'f'),
'D': ('float64', 8, 'd'),
# 'D': ('int64', 8, 'i'),
'I': ('int32', 4, 'i'),
'C': (None, 1, 'c'), # could map to numpy 'byte' but seems to be used for strings
'P': ('uint32', 4, 'I'),
'K': (None, 8, 'c'), # don't store in numpy format; 8 character strings
'X': (None, 0, 'c')
} # used for invalid code character (non-ascii)
init_not_packed = ['DAD', 'KID', 'UID', 'UNPACK']
def coerce(a, dtype):
"""Returns a version of numpy array a with elements coerced to dtype.
:meta private:
"""
if dtype is None or a.dtype == dtype:
return a
b = np.empty(a.shape, dtype = dtype)
b[:] = a
return b
def ensemble_vdb_list(run_dir, sort_list = True):
"""Returns a sorted list of vdb paths found in the directory tree under run_dir."""
ensemble_list = []
def recursive_vdb_list(dir):
nonlocal ensemble_list
for entry in os.scandir(dir):
if not entry.is_dir():
continue
if entry.name.endswith('.vdb') or entry.name.endswith('.vdb.zip'):
ensemble_list.append(entry.path)
continue
elif entry.name.endswith('.rst'):
continue # optimisation
recursive_vdb_list(entry.path)
def cmp_to_key(mycmp):
"""Convert a cmp= function into a key= function."""
class K:
def __init__(self, obj, *args):
self.obj = obj
def __lt__(self, other):
return mycmp(self.obj, other.obj) < 0
def __gt__(self, other):
return mycmp(self.obj, other.obj) > 0
def __eq__(self, other):
return mycmp(self.obj, other.obj) == 0
def __le__(self, other):
return mycmp(self.obj, other.obj) <= 0
def __ge__(self, other):
return mycmp(self.obj, other.obj) >= 0
def __ne__(self, other):
return mycmp(self.obj, other.obj) != 0
return K
def comparison(a, b):
"""Comparison function for a pair of case names."""
pa = 0
pb = 0
while True:
sa = pa
sb = pb
if pa >= len(a) and pb >= len(b):
return 0
while pa < len(a) and not a[pa].isdigit():
pa += 1
while pb < len(b) and not b[pb].isdigit():
pb += 1
if a[sa:pa].lower() < b[sb:pb].lower():
return -1
if a[sa:pa].lower() > b[sb:pb].lower():
return 1
if pa >= len(a) and pb >= len(b):
return 0
if pa >= len(a):
return -1
if pb >= len(b):
return 1
sa = pa
sb = pb
while pa < len(a) and a[pa].isdigit():
pa += 1
while pb < len(b) and b[pb].isdigit():
pb += 1
ia = int(a[sa:pa])
ib = int(b[sb:pb])
if ia < ib:
return -1
if ia > ib:
return 1
a_low = a.lower()
b_low = b.lower()
if a_low < b_low:
return -1
if a_low > b_low:
return 1
return 0
recursive_vdb_list(run_dir)
if sort_list:
sorted_list = sorted(ensemble_list, key = cmp_to_key(comparison))
ensemble_list = sorted_list
return ensemble_list
class Header():
"""Internal class for handling a Header record in a vdb file."""
def __init__(self, fp, place):
"""Creates a new Header record object."""
fp.seek(place)
block = fp.read(22)
# log.debug(f'Header block at place {place} returned {len(block)} bytes')
self.previous, self.next, c, self.bytes_per_item, self.number_of_items, self.first_fragment, self.max_items = \
unpack('=IIcBIII', block)
try:
self.item_type = c.decode()
except Exception:
self.item_type = 'X' # non-ascii character!
self.data_place = place + 22
# log.debug(' header previous: ' + str(self.previous))
# log.debug(' header next: ' + str(self.next))
# log.debug(' header item type: ' + str(self.item_type))
# log.debug(' header bytes per item: ' + str(self.bytes_per_item))
# log.debug(' header number of items: ' + str(self.number_of_items))
# log.debug(' header first fragment: ' + str(self.first_fragment))
# log.debug(' header max items: ' + str(self.max_items))
# log.debug(' header data place: ' + str(self.data_place))
class FragmentHeader():
"""Internal class for handling a Fragment Header record in a vdb file."""
def __init__(self, fp, place):
"""Creates a new Fragment Header record object."""
# log.debug(f'FragmentHeader init at place {place}')
fp.seek(place)
block = fp.read(8)
self.next, self.number_of_items = unpack('=II', block)
self.data_place = place + 8
# log.debug(f' fragment header number of items {self.number_of_items}; data place {self.data_place}; next {self.next}')
class RawData():
"""Internal class for handling Raw Data records in a vdb file."""
def __init__(self, fp, place, item_type, count, max_count):
"""Creates a new Raw Data record object."""
if max_count is not None and count > max_count:
count = max_count
self.a = None
self.c = None
dtype, byte_size, form_ch = key_dict[item_type]
# log.debug('raw data call: place {}; item type {}; count {}; dtype {}, byte size {}, form ch {}'.format(
# place, item_type, count, dtype, byte_size, form_ch))
fp.seek(place)
if dtype is None:
if form_ch == 'c':
chars = fp.read(count * byte_size)
if item_type == 'C':
self.c = chars.decode()
else:
self.c = []
for i in range(count):
self.c.append(chars[i * byte_size:(i + 1) * byte_size].decode().strip().upper())
else: # shouldn't come into play
block = fp.read(count * byte_size)
form = '=' + str(count) + form_ch
self.c = unpack(form, block)
elif dtype == 'float64': # try tentative 32bit word swap
b = fp.read(count * 8)
# c = b''
# for d in range(count):
# c += b[4*d+4:4*d+8] + b[4*d:4*d+4]
nda = np.ndarray((count, 2), dtype = 'int32', buffer = b)
self.a = np.empty((count, 2), dtype = int)
self.a[:, :] = nda[:, :]
else:
self.a = np.empty((count,), dtype = dtype)
self.a.data = fp.read(count * byte_size) # todo: check C ordering; endianess etc.
class Data():
"""Internal class for handling Data records in a vdb file."""
def __init__(self, fp, header):
"""Creates a new Data object."""
if header is None:
self.a = None
self.c = None
else:
# log.debug(f'Data init at place {header.data_place}; type {header.item_type}; number of items {header.number_of_items}')
raw = RawData(fp, header.data_place, header.item_type, header.number_of_items, header.max_items)
if raw is not None and raw.a is not None and raw.a.size == 1 and raw.a.dtype == 'int32' and header.next != null_uint32:
# log.debug(' skipping integer value of ' + str(raw.a[0]))
next_head = Header(fp, header.next)
raw = RawData(fp, next_head.data_place, next_head.item_type, next_head.number_of_items,
next_head.max_items)
header = next_head # making this up as I go along
self.a = raw.a
self.c = raw.c
if header.first_fragment != null_uint32:
# log.debug(f' chaining from {header.first_fragment}')
chain = FragmentChain(fp, header.first_fragment, header)
if self.a is not None:
self.a = np.append(self.a, chain.a)
if self.c is not None:
self.c += chain.c
# if self.c is not None:
# log.debug(f' data c {self.c}')
# elif self.a is not None:
# log.debug(f' data a {self.a}')
class Fragment():
"""Internal class for handling an individual Fragment record in a vdb file."""
def __init__(self, fp, place, header):
"""Creates a new Fragment record object."""
# log.debug(f'Fragment init at place {place}')
self.head = FragmentHeader(fp, place)
if self.head.number_of_items == 0:
# log.debug(' zero items in fragment')
self.c = None
self.a = None
else:
raw = RawData(fp, self.head.data_place, header.item_type, self.head.number_of_items, header.max_items)
self.a = raw.a
self.c = raw.c
# if self.c is not None:
# log.debug(f' fragment c [{self.c}]')
class FragmentChain():
"""Internal class for handling a chain of Fragment records in a vdb file."""
def __init__(self, fp, place, header): # returns either 1D numpy array (numeric data) or list of 8
"""Creates a new Fragment Chain object."""
self.a = None
self.c = None
# log.debug(f'FragmentChain init at place {place}')
while place != null_uint32:
fragment = Fragment(fp, place, header)
if fragment.head.number_of_items > 0:
if fragment.a is not None:
if self.a is None:
self.a = fragment.a
else:
self.a = np.concatenate(self.a, fragment.a)
if fragment.c:
if self.c is None:
self.c = []
if isinstance(fragment.c, str):
self.c.append(fragment.c)
elif isinstance(fragment.c, list):
for item in fragment.c:
if item.isascii():
self.c.append(item)
place = fragment.head.next
assert self.c is None or self.a is None, 'mixture of character and numeric data in fragment chain'
# todo: check number of elements matches header info?
class KP():
"""Internal class for a (Key, Pointer) record in a vdb file."""
def __init__(self, fp, place = 4):
"""Creates a new (Key, Pointer) record object."""
# log.debug(f'KP init at place {place}')
self.k_head = Header(fp, place)
assert self.k_head.item_type == 'K', 'did not find expected Key header'
assert self.k_head.bytes_per_item == 8, 'bytes per item not 8 in Key header'
assert self.k_head.number_of_items > 0, 'zero items in Key header'
place = self.k_head.next
assert place != null_uint32, 'no next header in Key header'
self.p_head = Header(fp, place)
assert self.p_head.item_type == 'P', 'did not find | |
"""Tests for core."""
import random
import grafanalib.core as G
import pytest
def dummy_grid_pos() -> G.GridPos:
return G.GridPos(h=1, w=2, x=3, y=4)
def dummy_data_link() -> G.DataLink:
return G.DataLink(
title='dummy title',
linkUrl='https://www.dummy-link-url.com',
isNewTab=True
)
def dummy_evaluator() -> G.Evaluator:
return G.Evaluator(
type=G.EVAL_GT,
params=42
)
def dummy_alert_condition() -> G.AlertCondition:
return G.AlertCondition(
target=G.Target(),
evaluator=G.Evaluator(
type=G.EVAL_GT,
params=42),
timeRange=G.TimeRange(
from_time='5m',
to_time='now'
),
operator=G.OP_AND,
reducerType=G.RTYPE_AVG,
)
def test_template_defaults():
t = G.Template(
name='test',
query='1m,5m,10m,30m,1h,3h,12h,1d',
type='interval',
default='1m',
)
assert t.to_json_data()['current']['text'] == '1m'
assert t.to_json_data()['current']['value'] == '1m'
def test_custom_template_ok():
t = G.Template(
name='test',
query='1,2,3',
default='1',
type='custom',
)
assert len(t.to_json_data()['options']) == 3
assert t.to_json_data()['current']['text'] == '1'
assert t.to_json_data()['current']['value'] == '1'
def test_custom_template_dont_override_options():
t = G.Template(
name='test',
query='1,2,3',
default='1',
options=[
{
"value": '1',
"selected": True,
"text": 'some text 1',
},
{
"value": '2',
"selected": False,
"text": 'some text 2',
},
{
"value": '3',
"selected": False,
"text": 'some text 3',
},
],
type='custom',
)
assert len(t.to_json_data()['options']) == 3
assert t.to_json_data()['current']['text'] == 'some text 1'
assert t.to_json_data()['current']['value'] == '1'
def test_table():
t = G.Table(
dataSource='some data source',
targets=[
G.Target(expr='some expr'),
],
title='table title',
transformations=[
{
"id": "seriesToRows",
"options": {}
},
{
"id": "organize",
"options": {
"excludeByName": {
"Time": True
},
"indexByName": {},
"renameByName": {
"Value": "Dummy"
}
}
}
]
)
assert len(t.to_json_data()['transformations']) == 2
assert t.to_json_data()['transformations'][0]["id"] == "seriesToRows"
def test_stat_no_repeat():
t = G.Stat(
title='dummy',
dataSource='data source',
targets=[
G.Target(expr='some expr')
]
)
assert t.to_json_data()['repeat'] is None
assert t.to_json_data()['repeatDirection'] is None
assert t.to_json_data()['maxPerRow'] is None
def test_DiscreteColorMappingItem_exception_checks():
with pytest.raises(TypeError):
G.DiscreteColorMappingItem(123)
with pytest.raises(TypeError):
G.DiscreteColorMappingItem("foo", color=123)
def test_DiscreteColorMappingItem():
t = G.DiscreteColorMappingItem('foo')
json_data = t.to_json_data()
assert json_data['text'] == 'foo'
assert json_data['color'] == G.GREY1
t = G.DiscreteColorMappingItem('foo', color='bar')
json_data = t.to_json_data()
assert json_data['text'] == 'foo'
assert json_data['color'] == 'bar'
def test_Discrete_exceptions():
with pytest.raises(ValueError):
G.Discrete(legendSortBy='foo')
with pytest.raises(TypeError):
G.Discrete(rangeMaps=[123, 456])
with pytest.raises(TypeError):
G.Discrete(valueMaps=['foo', 'bar'])
with pytest.raises(TypeError):
G.Discrete(lineColor=123)
with pytest.raises(TypeError):
G.Discrete(highlightOnMouseover=123)
def test_Discrete():
colorMap = [
G.DiscreteColorMappingItem('bar', color='baz'),
G.DiscreteColorMappingItem('foz', color='faz')
]
t = G.Discrete(
title='foo',
colorMaps=colorMap,
lineColor='#aabbcc',
metricNameColor=G.RGBA(1, 2, 3, .5),
decimals=123,
highlightOnMouseover=False,
showDistinctCount=True,
showLegendCounts=False,
)
json_data = t.to_json_data()
assert json_data['colorMaps'] == colorMap
assert json_data['title'] == 'foo'
assert json_data['type'] == G.DISCRETE_TYPE
assert json_data['rangeMaps'] == []
assert json_data['valueMaps'] == []
assert json_data['backgroundColor'] == G.RGBA(128, 128, 128, 0.1)
assert json_data['lineColor'] == '#aabbcc'
assert json_data['metricNameColor'] == G.RGBA(1, 2, 3, .5)
assert json_data['timeTextColor'] == "#d8d9da"
assert json_data['valueTextColor'] == "#000000"
assert json_data['decimals'] == 123
assert json_data['legendPercentDecimals'] == 0
assert json_data['rowHeight'] == 50
assert json_data['textSize'] == 24
assert json_data['textSizeTime'] == 12
assert json_data['highlightOnMouseover'] is False
assert json_data['showLegend'] is True
assert json_data['showLegendPercent'] is True
assert json_data['showLegendNames'] is True
assert json_data['showLegendValues'] is True
assert json_data['showTimeAxis'] is True
assert json_data['use12HourClock'] is False
assert json_data['writeMetricNames'] is False
assert json_data['writeLastValue'] is True
assert json_data['writeAllValues'] is False
assert json_data['showDistinctCount'] is True
assert json_data['showLegendCounts'] is False
assert json_data['showLegendTime'] is None
assert json_data['showTransitionCount'] is None
def test_StatValueMappings_exception_checks():
with pytest.raises(TypeError):
G.StatValueMappings(
G.StatValueMappingItem('foo', '0', 'dark-red'),
"not of type StatValueMappingItem",
)
def test_StatValueMappings():
t = G.StatValueMappings(
G.StatValueMappingItem('foo', '0', 'dark-red'), # Value must a string
G.StatValueMappingItem('bar', '1', 'purple'),
)
json_data = t.to_json_data()
assert json_data['type'] == 'value'
assert json_data['options']['0']['text'] == 'foo'
assert json_data['options']['0']['color'] == 'dark-red'
assert json_data['options']['1']['text'] == 'bar'
assert json_data['options']['1']['color'] == 'purple'
def test_StatRangeMappings():
t = G.StatRangeMappings(
'dummy_text',
startValue=10,
endValue=20,
color='dark-red'
)
json_data = t.to_json_data()
assert json_data['type'] == 'range'
assert json_data['options']['from'] == 10
assert json_data['options']['to'] == 20
assert json_data['options']['result']['text'] == 'dummy_text'
assert json_data['options']['result']['color'] == 'dark-red'
def test_StatMapping():
t = G.StatMapping(
'dummy_text',
startValue='foo',
endValue='bar',
)
json_data = t.to_json_data()
assert json_data['text'] == 'dummy_text'
assert json_data['from'] == 'foo'
assert json_data['to'] == 'bar'
def test_stat_with_repeat():
t = G.Stat(
title='dummy',
dataSource='data source',
targets=[
G.Target(expr='some expr')
],
repeat=G.Repeat(
variable="repetitionVariable",
direction='h',
maxPerRow=10
)
)
assert t.to_json_data()['repeat'] == 'repetitionVariable'
assert t.to_json_data()['repeatDirection'] == 'h'
assert t.to_json_data()['maxPerRow'] == 10
def test_single_stat():
data_source = 'dummy data source'
targets = ['dummy_prom_query']
title = 'dummy title'
single_stat = G.SingleStat(data_source, targets, title)
data = single_stat.to_json_data()
assert data['targets'] == targets
assert data['datasource'] == data_source
assert data['title'] == title
def test_dashboard_list():
title = 'dummy title'
dashboard_list = G.DashboardList(title=title)
data = dashboard_list.to_json_data()
assert data['targets'] == []
assert data['datasource'] is None
assert data['title'] == title
assert data['starred'] is True
def test_logs_panel():
data_source = 'dummy data source'
targets = ['dummy_prom_query']
title = 'dummy title'
logs = G.Logs(data_source, targets, title)
data = logs.to_json_data()
assert data['targets'] == targets
assert data['datasource'] == data_source
assert data['title'] == title
assert data['options']['showLabels'] is False
assert data['options']['showCommonLabels'] is False
assert data['options']['showTime'] is False
assert data['options']['wrapLogMessage'] is False
assert data['options']['sortOrder'] == 'Descending'
assert data['options']['dedupStrategy'] == 'none'
assert data['options']['enableLogDetails'] is False
assert data['options']['prettifyLogMessage'] is False
def test_notification():
uid = 'notification_channel'
notification = G.Notification(uid)
data = notification.to_json_data()
assert data['uid'] == uid
def test_graph_panel():
data_source = 'dummy data source'
targets = ['dummy_prom_query']
title = 'dummy title'
graph = G.Graph(data_source, targets, title)
data = graph.to_json_data()
assert data['targets'] == targets
assert data['datasource'] == data_source
assert data['title'] == title
assert 'alert' not in data
def test_panel_extra_json():
data_source = 'dummy data source'
targets = ['dummy_prom_query']
title = 'dummy title'
extraJson = {
'fillGradient': 6,
'yaxis': {'align': True},
'legend': {'avg': True},
}
graph = G.Graph(data_source, targets, title, extraJson=extraJson)
data = graph.to_json_data()
assert data['targets'] == targets
assert data['datasource'] == data_source
assert data['title'] == title
assert 'alert' not in data
assert data['fillGradient'] == 6
assert data['yaxis']['align'] is True
# Nested non-dict object should also be deep-updated
assert data['legend']['max'] is False
assert data['legend']['avg'] is True
def test_graph_panel_threshold():
data_source = 'dummy data source'
targets = ['dummy_prom_query']
title = 'dummy title'
thresholds = [
G.GraphThreshold(20.0),
G.GraphThreshold(40.2, colorMode="ok")
]
graph = G.Graph(data_source, targets, title, thresholds=thresholds)
data = graph.to_json_data()
assert data['targets'] == targets
assert data['datasource'] == data_source
assert data['title'] == title
assert 'alert' not in data
assert data['thresholds'] == thresholds
def test_graph_panel_alert():
data_source = 'dummy data source'
targets = ['dummy_prom_query']
title = 'dummy title'
alert = [
G.AlertCondition(G.Target(), G.Evaluator('a', 'b'), G.TimeRange('5', '6'), 'd', 'e')
]
thresholds = [
G.GraphThreshold(20.0),
G.GraphThreshold(40.2, colorMode="ok")
]
graph = G.Graph(data_source, targets, title, thresholds=thresholds, alert=alert)
data = graph.to_json_data()
assert data['targets'] == targets
assert data['datasource'] == data_source
assert data['title'] == title
assert data['alert'] == alert
assert data['thresholds'] == []
def test_graph_threshold():
value = 20.0
colorMode = "ok"
threshold = G.GraphThreshold(value, colorMode=colorMode)
data = threshold.to_json_data()
assert data['value'] == value
assert data['colorMode'] == colorMode
assert data['fill'] is True
assert data['line'] is True
assert data['op'] == G.EVAL_GT
assert 'fillColor' not in data
assert 'lineColor' not in data
def test_graph_threshold_custom():
value = 20.0
colorMode = "custom"
color = G.GREEN
threshold = G.GraphThreshold(value, colorMode=colorMode, fillColor=color)
data = threshold.to_json_data()
assert data['value'] == value
assert data['colorMode'] == colorMode
assert data['fill'] is True
assert data['line'] is True
assert data['op'] == G.EVAL_GT
assert data['fillColor'] == color
assert data['lineColor'] == G.RED
def test_alert_list():
alert_list = G.AlertList(
dashboardTags=['dummy tag'],
description='dummy description',
gridPos=dummy_grid_pos(),
id=random.randint(1, 10),
links=[dummy_data_link(), dummy_data_link()],
nameFilter='dummy name filter',
stateFilter=[G.ALERTLIST_STATE_ALERTING, G.ALERTLIST_STATE_OK],
title='dummy title'
)
alert_list.to_json_data()
def test_alert():
alert = G.Alert(
name='dummy name',
message='dummy message',
alertConditions=dummy_alert_condition(),
alertRuleTags=dict(alert_rul_dummy_key='alert rul dummy value')
)
alert.to_json_data()
def test_worldmap():
data_source = 'dummy data source'
targets = ['dummy_prom_query']
title = 'dummy title'
worldmap = G.Worldmap(data_source, targets, title, circleMaxSize=11)
data = worldmap.to_json_data()
assert data['targets'] == targets
assert data['datasource'] == data_source
assert data['title'] == title
assert data['circleMaxSize'] == 11
def test_stateTimeline():
data_source = 'dummy data source'
targets = ['dummy_prom_query']
title = 'dummy title'
stateTimeline = G.StateTimeline(data_source, targets, title, rowHeight=0.7)
data = stateTimeline.to_json_data()
assert data['targets'] == targets
assert data['datasource'] == data_source
assert data['title'] == title
assert data['options']['rowHeight'] == 0.7
def test_timeseries():
data_source = 'dummy data source'
targets = ['dummy_prom_query']
title = 'dummy title'
timeseries = G.TimeSeries(data_source, targets, title)
data = timeseries.to_json_data()
assert data['targets'] == targets
assert data['datasource'] == data_source
assert data['title'] == title
assert data['fieldConfig']['overrides'] == []
def test_timeseries_with_overrides():
data_source = 'dummy data source'
targets = ['dummy_prom_query']
title = 'dummy title'
overrides = [
{
"matcher": {
"id": "byName",
"options": "min"
},
"properties": [
{
"id": "custom.fillBelowTo",
"value": "min"
},
{
"id": "custom.lineWidth",
"value": 0
}
]
}
]
timeseries = G.TimeSeries(
dataSource=data_source,
targets=targets,
title=title,
overrides=overrides,
)
data = timeseries.to_json_data()
assert data['targets'] == targets
assert data['datasource'] == data_source
assert data['title'] == title
assert data['fieldConfig']['overrides'] == overrides
def test_news():
title = 'dummy title'
feedUrl = "www.example.com"
news = G.News(title=title, feedUrl=feedUrl)
data = news.to_json_data()
assert data['options']['feedUrl'] == feedUrl
assert data['title'] == title
def test_pieChartv2():
data_source = 'dummy data source'
targets = ['dummy_prom_query']
title = 'dummy title'
pie = G.PieChartv2(data_source, targets, title)
data = pie.to_json_data()
assert data['targets'] == targets
assert data['datasource'] == data_source
assert | |
<gh_stars>0
from __future__ import annotations
from typing import Dict, Iterable, List, Optional, Tuple, TYPE_CHECKING
from random import choice, choices, randint, uniform, random
from itertools import accumulate
from coords import Coords
from data_globals import PLANET_NEUTRAL, PLANET_BARREN, PLANET_BOMBED_OUT, PLANET_FRIENDLY, PLANET_PREWARP, PLANET_RELATIONS, PLANET_TYPES, PLANET_WARP_CAPABLE, STATUS_ACTIVE, STATUS_CLOAK_COMPRIMISED, STATUS_CLOAKED, STATUS_DERLICT, STATUS_HULK, STATUS_OBLITERATED, PlanetHabitation, PlanetRelation, PLANET_RELATION_DICT
import colors
from nation import Nation
from torpedo import ALL_TORPEDO_TYPES, Torpedo
if TYPE_CHECKING:
from game_data import GameData
from message_log import MessageLog
from starship import Starship
star_number_weights = tuple(accumulate((5, 12, 20, 9, 6, 3)))
star_number_weights_len = len(star_number_weights)
class CanDockWith:
def can_dock_with(self, starship:Starship, require_adjacent:bool=True):
raise NotImplementedError
@property
def get_dock_repair_factor(self):
raise NotImplementedError
class InterstellerObject:
def __init__(self, local_coords:Coords, sector_coords:Coords, system:SubSector) -> None:
self.local_coords = local_coords
self.sector_coords = sector_coords
self.system = system
def hit_by_torpedo(self, is_player:bool, game_data:GameData, message_log:MessageLog, torpedo:Torpedo):
raise NotImplementedError
STAR_TYPES = (
"Main sequence O",
"Main sequence B",
"Main sequence A",
"Main sequence F",
"Main sequence G (Yellow dwarf)",
"Main sequence K (Orange dwarf)",
"Main sequence M (Red dwarf)",
"Brown dwarf",
"Brown subdwarf",
"Blue subdwarf",
"Blue giant",
"Blue supergiant",
"Blue hypergiant",
"Yellow giant",
"Yellow supergiant",
"Yellow hypergiant",
"Red giant",
"Red supergiant",
"Red hypergiant",
"White dwarf",
"Neutron star",
"Black hole"
)
STAR_WEIGHTS = tuple(
accumulate(
(
6,
48,
118,
325,
731,
1646,
5730,
1024,
1237,
33,
24,
11,
4,
12,
6,
2,
51,
26,
8,
34,
9,
3,
)
)
)
STAR_COLORS = {
"Main sequence O" : colors.star_blue,
"Main sequence B" : colors.star_blue_white,
"Main sequence A" : colors.star_white,
"Main sequence F" : colors.star_yellow_white,
"Main sequence G (Yellow dwarf)" : colors.star_yellow,
"Main sequence K (Orange dwarf)" : colors.star_orange,
"Main sequence M (Red dwarf)" : colors.star_red,
"Brown dwarf" : colors.star_brown,
"Brown subdwarf" : colors.star_brown,
"Blue subdwarf" : colors.star_blue,
"Blue giant" : colors.star_blue,
"Blue supergiant" : colors.star_blue,
"Blue hypergiant" : colors.star_blue,
"Yellow giant" : colors.star_yellow,
"Yellow supergiant" : colors.star_yellow,
"Yellow hypergiant" : colors.star_yellow,
"Red giant" : colors.star_red,
"Red supergiant" : colors.star_red,
"Red hypergiant" : colors.star_red,
"White dwarf" : colors.star_white,
"Neutron star" : colors.star_white,
"Black hole" : colors.black
}
'''
STAR_COLORS = (
colors.star_blue,
colors.star_blue_white,
colors.star_white,
colors.star_yellow_white,
colors.star_yellow,
colors.star_orange,
colors.star_red,
colors.star_brown,
colors.black
)
'''
class Star(InterstellerObject):
orderSuffexes = ['Alpha ', 'Beta ', 'Gamma ', 'Delta ']
planetSuffexes = ['', ' I', ' II', ' III', ' IV', ' V', ' VI', ' VII', ' VIII']
def __init__(self, local_coords:Coords, sector_coords:Coords, system:SubSector):
super().__init__(local_coords, sector_coords, system)
self.name = choices(
STAR_TYPES,
cum_weights=STAR_WEIGHTS
)[0]
self.color = STAR_COLORS[self.name]
self.bg = colors.white if self.color is colors.black else colors.black
'''
if self.color in {colors.star_blue, colors.star_blue_white}:
self.name = choice(("Blue giant", "Blue supergiant", "Blue hypergiant", "Blue subdwarf"))
elif self.color is colors.star_white:
self.name = "White dwarf"
elif self.color in {colors.star_yellow, colors.star_yellow_white}:
self.name = choice(("Yellow dwarf", "Yellow giant", "Yellow supergiant", "Yellow hypergiant"))
elif self.color is colors.star_orange:
self.name = "Orange dwarf"
elif self.color is colors.star_red:
self.name = choice(("Red dwarf", "Red giant", "Red supergiant", "Red hypergiant"))
elif self.color is colors.star_brown:
self.name = choice(("Brown dwarf", "Brown subdwarf"))
else:
self.name = "Black hole"
'''
def hit_by_torpedo(self, is_player:bool, game_data:GameData, message_log:MessageLog, torpedo:Torpedo):
pass
class SubSectorInfo:
def __init__(self, x:int, y:int) -> None:
self.coords = Coords(x=x,y=y)
self.total_stars = 0
self.planets_dict:Dict[Coords, Planet] = {}
"""This value refers to planets that are allied to the ship in question, and not nescaraly to the player
"""
self.friendly_planets = 0
"""This value refers to planets that are neutral to the ship in question, and not nescaraly to the player
"""
self.neutral_planets = 0
"""This value refers to planets that are hostile to the ship in question, and not nescaraly to the player
"""
self.unfriendly_planets = 0
self.barren_planets = 0
self.objectives = 0
"""This value refers to ships that are hostile to the ship in question, and not nescaraly to the player
"""
self.hostile_ships = 0
"""This value refers to ships that are allied to the ship in question, and not nescaraly to the player
"""
self.allied_ships = 0
self.derelicts = 0
self.planet_count_needs_updating = True
self.ship_count_needs_updating = True
@property
def needs_updating(self):
return self.planet_count_needs_updating or self.ship_count_needs_updating
class SubSector:
"""A SubSector is a region of space that contains stars and planets.
Returns:
[type]: [description]
Yields:
[type]: [description]
"""
@staticmethod
def __grab_random_and_remove(rand_list:List[Tuple[int,int]]):
r = choice(rand_list)
rand_list.remove(r)
return r
@staticmethod
def __gen_safe_spot_list(x_range:range, y_range:range):
for y in y_range:
for x in x_range:
yield Coords(x=x,y=y)
def __init__(self, gd:GameData, x:int, y:int):
#self.astroObjects = [Sector.__buildSlice(gd.subsec_size_x) for s in gd.subsec_size_range_y]
self.safe_spots = list(SubSector.__gen_safe_spot_list(gd.subsec_size_range_x, gd.subsec_size_range_y))
self.coords = Coords(x=x,y=y)
#self.x = x
#self.y = y
#print(stars)
self.game_data = gd
self.stars_dict:Dict[Coords, Star] = {}
self.total_stars = 0
#self.planets = []
self.planets_dict:Dict[Coords, Planet] = {}
self.friendly_planets = 0
self.neutral_planets = 0
self.unfriendly_planets = 0
self.barren_planets = 0
self.planets_friendly_to_player = 0
self.planets_neutral_to_player = 0
self.planets_hostile_to_player = 0
self.planets_friendly_to_enemy = 0
self.planets_neutral_to_enemy = 0
self.planets_hostile_to_enemy = 0
self.player_present = False
@property
def get_player_subsector_info(self):
return self.game_data.player_subsector_info[self.coords.y][self.coords.x]
@property
def get_enemy_subsector_info(self):
return self.game_data.enemy_subsector_info[self.coords.y][self.coords.x]
def random_setup(self, star_number_weights:Iterable[int], star_number_weights_len:int):
stars = choices(range(star_number_weights_len), cum_weights=star_number_weights)[0]
for i in range(stars):
x, y = SubSector.__grab_random_and_remove(self.safe_spots)
xy = Coords(x=x,y=y)
#self.astroObjects[y][x] = '*'
self.total_stars+=1
self.stars_dict[xy] = Star(
local_coords=xy, sector_coords=self.coords, system=self
)
number_of_stars = self.number_of_stars
if number_of_stars > 0:
if number_of_stars == 1:
number_of_planets = randint(0, 4)
elif number_of_stars == 2:
number_of_planets = randint(1, 6)
else:
number_of_planets = randint(2, 8)
for p in range(number_of_planets):
x,y = SubSector.__grab_random_and_remove(self.safe_spots)
local_coords = Coords(x=x, y=y)
planet_habbitation = choice(PLANET_TYPES)
has_disposition_towards_warp_capiable_civs = planet_habbitation.has_disposition_towards_warp_capiable_civs
player_planet_relation, enemy_planet_relation = (
choice(PLANET_RELATIONS), choice(PLANET_RELATIONS)
) if has_disposition_towards_warp_capiable_civs else (
PlanetRelation.HOSTILE, PlanetRelation.HOSTILE
)
p = Planet(
planet_habbitation=planet_habbitation,
player_planet_relation = player_planet_relation,
enemy_planet_relation = enemy_planet_relation,
local_coords = local_coords, sector_coords=self.coords,
system=self
)
self.planets_dict[local_coords] = p
if has_disposition_towards_warp_capiable_civs:
if player_planet_relation == PlanetRelation.FRIENDLY:
self.friendly_planets += 1
elif player_planet_relation == PlanetRelation.NEUTRAL:
self.neutral_planets += 1
else:
self.unfriendly_planets += 1
else:
self.unfriendly_planets += 1
def count_planets(self):
planet_habitations_for_player = [
planet.player_display_status for planet in self.planets_dict.values()
]
total_planets = len(planet_habitations_for_player)
self.planets_friendly_to_player = len(
[planet for planet in planet_habitations_for_player if planet == PLANET_FRIENDLY]
)
self.planets_neutral_to_player = len(
[planet for planet in planet_habitations_for_player if planet == PLANET_NEUTRAL]
)
self.planets_hostile_to_player = total_planets - (
self.planets_friendly_to_player + self.planets_neutral_to_player
)
planet_habitations_for_enemy = [
planet.enemy_display_status for planet in self.planets_dict.values()
]
self.planets_friendly_to_enemy = len(
[planet for planet in planet_habitations_for_enemy if planet == PLANET_FRIENDLY]
)
self.planets_neutral_to_enemy = len(
[planet for planet in planet_habitations_for_enemy if planet == PLANET_NEUTRAL]
)
self.planets_hostile_to_enemy = total_planets - (
self.planets_friendly_to_enemy + self.planets_neutral_to_enemy
)
x, y = self.coords.x, self.coords.y
self.game_data.player_subsector_info[y][x].planet_count_needs_updating = True
self.game_data.enemy_subsector_info[y][x].planet_count_needs_updating = True
@property
def number_of_planets(self):
return len(self.planets_dict)
@property
def number_of_stars(self):
return len(self.stars_dict)
def find_random_safe_spot(self, ship_list:Optional[Iterable[Starship]]=None):
if ship_list:
ship_positions = set(
ship.local_coords.create_coords() for ship in ship_list if
ship.sector_coords.x == self.x and ship.sector_coords.y == self.y
)
okay_spots = [c for c in self.safe_spots if c not in ship_positions]
return choice(okay_spots)
return choice(self.safe_spots)
def find_random_safe_spots(self, how_many:int, ship_list:Optional[Iterable[Starship]]=None):
if ship_list:
ship_positions = set(
ship.local_coords.create_coords() for ship in ship_list if ship.sector_coords.x == self.x and
ship.sector_coords.y == self.y
)
okay_spots = [c for c in self.safe_spots if c not in ship_positions]
return choices(okay_spots, k=how_many)
return choices(self.safe_spots, k=how_many)
def destroy_ship(self, ship:Starship):
"""This should be called only when a ship is destroyed
Args:
ship (Starship): The ship in question
"""
player_subsector_info = self.get_player_subsector_info
enemy_subsector_info = self.get_enemy_subsector_info
if ship.ship_status == STATUS_DERLICT:
player_subsector_info.needs_updating = True
enemy_subsector_info.needs_updating = True
elif ship.is_enemy:
player_subsector_info.ship_count_needs_updating = True
enemy_subsector_info.allied_ships -= 1
if ship.is_mission_critical:
enemy_subsector_info.objectives -= 1
else:
if ship is ship.game_data.player:
self.player_present = False
player_subsector_info.allied_ships -= 1
if ship.is_mission_critical:
player_subsector_info.objectives -= 1
enemy_subsector_info.ship_count_needs_updating = True
def disable_ship(self, ship:Starship):
player_subsector_info = self.get_player_subsector_info
enemy_subsector_info = self.get_enemy_subsector_info
player_subsector_info.ship_count_needs_updating = True
enemy_subsector_info.ship_count_needs_updating = True
if ship.is_enemy:
enemy_subsector_info.allied_ships -= 1
if ship.is_mission_critical:
enemy_subsector_info.objectives -= 1
else:
player_subsector_info.allied_ships -= 1
if ship.is_mission_critical:
player_subsector_info.objectives -= 1
def enable_ship(self, ship:Starship):
assert ship.ship_status != STATUS_DERLICT
is_mission_critical = ship.is_mission_critical
enemy_subsector_info = ship.get_sub_sector.get_enemy_subsector_info
player_subsector_info = ship.get_sub_sector.get_player_subsector_info
if ship.is_enemy:
enemy_subsector_info.allied_ships += 1
enemy_subsector_info.derelicts -= 1
| |
# Add devices to database
if iostat_processing and iostat_device_block_processing and iostat_header != "":
if line.strip() != "":
iostat_row_dict = {}
# if European "," for ".", do that first
line = line.replace(",", ".")
# get rid of multiple whitespaces, then use comma separator so the AM/PM is preserved if its there
line = " ".join(line.split())
line = line.replace(" ", ",")
if iostat_date_included:
if iostat_am_pm:
line = (
date_time.split()[0]
+ ","
+ date_time.split()[1]
+ " "
+ date_time.split()[2]
+ ","
+ line
)
else:
line = date_time.split()[0] + "," + str(date_time.split()[1]) + "," + line
values = line.split(",")
values = [i.strip() for i in values] # strip off carriage return etc
values_converted = [get_number_type(v) for v in values]
iostat_row_dict = dict(zip(iostat_columns, values_converted))
iostat_row_dict["html name"] = html_filename
# Check date format
if not iostat_date_convert and iostat_row_dict['Date'] != iostat_date:
iostat_date = iostat_row_dict['Date']
iostat_date_convert = check_date("iostat", run_start_date, iostat_row_dict['Date'])
if iostat_date_convert:
new_date = make_mdy_date(iostat_row_dict["Date"])
new_date_dict = {"Date": new_date}
iostat_row_dict.update(new_date_dict)
# Added for pretty processing
iostat_row_dict["datetime"] = f'{iostat_row_dict["Date"]} {iostat_row_dict["Time"]}'
iostat_rows_list.append(iostat_row_dict)
# Header line found, next line is start of device block
if "Device" in line:
iostat_device_block_processing = True
# First time in create column names
if iostat_processing and iostat_header == "" and "Device" in line:
if iostat_date_included:
iostat_header = f"Date Time {line}"
else:
iostat_header = f"{line}"
iostat_header = iostat_header.replace(":", "") # "Device:" used later on logic
iostat_columns = iostat_header.split()
iostat_columns = [i.strip() for i in iostat_columns] # strip off carriage return etc
# Add each section to the database
if mgstat_header != "":
# Create dataframe of rows. Shortcut here to creating table columns or later charts etc
mgstat_df = pd.DataFrame(mgstat_rows_list)
# Remove any rows with NaN
mgstat_df.dropna(inplace=True)
# Want to just dump a dataframe to a table and avoid all the roll-your-own steps ;)
# SQLAlchemy is included in pandas
#
# conn = sqlite3.connect('SystemPerformance.sqlite')
# mgstat_df.to_sql('mgstat', conn, if_exists='replace', index=False)
#
# else create the table and load data as below
create_generic_table(connection, "mgstat", mgstat_df)
# Add the rows to the table, loop through the list of dictionaries
for row in mgstat_rows_list:
insert_dict_into_table(connection, "mgstat", row)
connection.commit()
if csv_out:
mgstat_output_csv = f"{output_filepath_prefix}mgstat.csv"
# if file does not exist write header
if not os.path.isfile(mgstat_output_csv):
mgstat_df.to_csv(mgstat_output_csv, header='column_names', index=False, encoding='utf-8')
else: # else it exists so append without writing the header
mgstat_df.to_csv(mgstat_output_csv, mode='a', header=False, index=False, encoding='utf-8')
if vmstat_header != "":
vmstat_df = pd.DataFrame(vmstat_rows_list)
vmstat_df.dropna(inplace=True)
create_generic_table(connection, "vmstat", vmstat_df)
for row in vmstat_rows_list:
insert_dict_into_table(connection, "vmstat", row)
connection.commit()
if csv_out:
vmstat_output_csv = f"{output_filepath_prefix}vmstat.csv"
# if file does not exist write header
if not os.path.isfile(vmstat_output_csv):
vmstat_df.to_csv(vmstat_output_csv, header='column_names', index=False, encoding='utf-8')
else: # else it exists so append without writing the header
vmstat_df.to_csv(vmstat_output_csv, mode='a', header=False, index=False, encoding='utf-8')
if perfmon_header != "":
perfmon_df = pd.DataFrame(perfmon_rows_list)
perfmon_df.dropna(inplace=True)
create_generic_table(connection, "perfmon", perfmon_df)
for row in perfmon_rows_list:
insert_dict_into_table(connection, "perfmon", row)
connection.commit()
if csv_out:
perfmon_output_csv = f"{output_filepath_prefix}vmstat.csv"
# if file does not exist write header
if not os.path.isfile(perfmon_output_csv):
perfmon_df.to_csv(perfmon_output_csv, header='column_names', index=False, encoding='utf-8')
else: # else it exists so append without writing the header
perfmon_df.to_csv(perfmon_output_csv, mode='a', header=False, index=False, encoding='utf-8')
if iostat_header != "":
iostat_df = pd.DataFrame(iostat_rows_list)
iostat_df.dropna(inplace=True)
create_generic_table(connection, "iostat", iostat_df)
for row in iostat_rows_list:
insert_dict_into_table(connection, "iostat", row)
connection.commit()
if csv_out:
iostat_output_csv = f"{output_filepath_prefix}iostat.csv"
# if file does not exist write header
if not os.path.isfile(iostat_output_csv):
iostat_df.to_csv(iostat_output_csv, header='column_names', index=False, encoding='utf-8')
else: # else it exists so append without writing the header
iostat_df.to_csv(iostat_output_csv, mode='a', header=False, index=False, encoding='utf-8')
def create_overview(connection, sp_dict):
cursor = connection.cursor()
create_overview_table = """
CREATE TABLE IF NOT EXISTS overview (
id INTEGER PRIMARY KEY AUTOINCREMENT,
field TEXT NOT NULL,
value TEXT
);
"""
execute_simple_query(connection, create_overview_table)
# Create the insert query string
for key in sp_dict:
cursor.execute("INSERT INTO overview (field, value) VALUES (?, ?)", (key, sp_dict[key]))
connection.commit()
return
def simple_chart(data, column_name, title, max_y, filepath, output_prefix, **kwargs):
file_prefix = kwargs.get("file_prefix", "")
if file_prefix != "":
file_prefix = f"{file_prefix}_"
# Convert datetime string to datetime type (data is a _view_ of full dataframe, create a copy to update here)
png_data = data.copy()
png_data.loc[:, 'datetime'] = pd.to_datetime(data['datetime'], infer_datetime_format=True)
colormap_name = "Set1"
plt.style.use('seaborn-whitegrid')
plt.figure(num=None, figsize=(16, 6), dpi=300)
palette = plt.get_cmap(colormap_name)
color = palette(1)
fig, ax = plt.subplots()
plt.gcf().set_size_inches(16, 6)
plt.gcf().set_dpi(300)
ax.plot(png_data['datetime'], png_data['metric'], label=column_name, color=color, marker='.', linestyle="none", alpha=0.7)
ax.grid(which='major', axis='both', linestyle='--')
ax.set_title(title, fontsize=14)
ax.set_ylabel(column_name, fontsize=10)
ax.tick_params(labelsize=10)
ax.set_ylim(bottom=0) # Always zero start
if max_y != 0:
ax.set_ylim(top=max_y)
if png_data["metric"].max() > 10 or "%" in column_name:
ax.yaxis.set_major_formatter(mpl.ticker.StrMethodFormatter("{x:,.0f}"))
elif png_data["metric"].max() < 0.002:
ax.yaxis.set_major_formatter(mpl.ticker.StrMethodFormatter("{x:,.4f}"))
else:
ax.yaxis.set_major_formatter(mpl.ticker.StrMethodFormatter("{x:,.3f}"))
locator = plt_dates.AutoDateLocator()
ax.xaxis.set_major_locator(locator)
ax.xaxis.set_major_formatter(plt_dates.AutoDateFormatter(locator=locator, defaultfmt='%H:%M'))
plt.setp(ax.get_xticklabels(), rotation=45, ha="right")
plt.tight_layout()
output_name = column_name.replace("/", "_")
plt.savefig(f"{filepath}{output_prefix}{file_prefix}z_{output_name}.png", format='png')
plt.close('all')
def linked_chart(data, column_name, title, max_y, filepath, output_prefix, **kwargs):
file_prefix = kwargs.get("file_prefix", "")
if file_prefix != "":
file_prefix = f"{file_prefix}_"
# First we’ll create an interval selection using the selection_interval() function (in this case for x axis only)
brush = alt.selection(type="interval", encodings=["x"])
# Create the chart
base = (
alt.Chart(data)
.mark_line()
.encode(
# alt.X("datetime:T", title="Time", axis=alt.Axis(format='%e %b, %Y')),
alt.X("datetime:T", title="Time"),
alt.Y("metric", title=column_name, scale=alt.Scale(domain=(0, max_y))),
alt.Color("Type", title="Metric"),
tooltip=["metric"],
)
.properties(height=500, width=1333, title=title)
)
# Upper is zoomed area X axis
upper = base.encode(alt.X("datetime:T", title="Time Zoom", scale=alt.Scale(domain=brush)))
# Lower chart bind the brush in our chart by setting the selection property
lower = base.properties(height=150, title="").add_selection(brush)
alt.hconcat(upper & lower).configure_title(fontSize=14, color="black").configure_legend(
strokeColor="gray", fillColor="#EEEEEE", padding=10, cornerRadius=10, orient="right"
)
output_name = column_name.replace("/", "_")
(upper & lower).save(f"{filepath}{output_prefix}{file_prefix}{output_name}.html", scale_factor=2.0)
def interactive_chart(data, column_name, title, max_y, filepath, output_prefix, **kwargs):
file_prefix = kwargs.get("file_prefix", "")
if file_prefix != "":
file_prefix = f"{file_prefix}_"
output_name = column_name.replace("/", "_")
# Create the chart
alt.Chart(data).mark_line().encode(
alt.X("datetime:T", title="Time"),
alt.Y("metric", title=column_name, scale=alt.Scale(domain=(0, max_y))),
alt.Color("Type", title="Metric"),
tooltip=["metric"],
).properties(height=500, width=1333, title=title).interactive().save(f"{filepath}{output_prefix}{file_prefix}int_{output_name}.html", scale_factor=2.0)
def linked_chart_no_time(data, column_name, title, max_y, filepath, output_prefix, **kwargs):
file_prefix = kwargs.get("file_prefix", "")
if file_prefix != "":
file_prefix = f"{file_prefix}_"
brush = alt.selection(type="interval", encodings=["x"])
# Create the chart
base = (
alt.Chart(data)
.mark_line()
.encode(
alt.X("id_key:Q", title="Count"),
alt.Y("metric", title=column_name, scale=alt.Scale(domain=(0, max_y))),
alt.Color("Type", title="Metric"),
tooltip=["metric:N"],
)
.properties(height=500, width=1333, title=title)
)
# Upper is zoomed area X axis
upper = base.encode(alt.X("id_key:Q", title="Count Zoom", scale=alt.Scale(domain=brush)))
# Lower chart bind the brush in our chart by setting the selection property
lower = base.properties(height=150, title="").add_selection(brush)
alt.hconcat(upper & lower).configure_title(fontSize=14, color="black").configure_legend(
strokeColor="gray", fillColor="#EEEEEE", padding=10, cornerRadius=10, orient="right"
)
output_name = column_name.replace("/", "_")
(upper & lower).save(f"{filepath}{output_prefix}{file_prefix}{output_name}.html", scale_factor=2.0)
def chart_vmstat(connection, filepath, output_prefix, png_out):
# print(f"vmstat...")
# Get useful
customer = execute_single_read_query(connection, "SELECT * FROM overview WHERE field = 'customer';")[2]
number_cpus = execute_single_read_query(connection, "SELECT * FROM overview WHERE field = 'number cpus';")[2]
processor = execute_single_read_query(connection, "SELECT * FROM overview WHERE field = 'processor model';")[2]
# Read in to dataframe
df = pd.read_sql_query("SELECT * FROM vmstat", connection)
# Add a new total CPU column, add a datetime column
df["Total CPU"] = 100 - df["id"]
df["datetime"] = df["Date"] + " " + df["Time"]
# Format the data for Altair
# Cut down the df to just the the list of categorical data we care about (columns)
columns_to_chart = list(df.columns)
unwanted_columns = ["id_key", "Date", "Time", "html name"]
columns_to_chart = [ele for ele in columns_to_chart if ele not in unwanted_columns]
vmstat_df = df[columns_to_chart]
# unpivot the dataframe; first column is date time column, column name is next, then the value in that column
vmstat_df = vmstat_df.melt("datetime", var_name="Type", value_name="metric")
# print(f"{vmstat_df.sample(3)}")
# datetime Type metric
# 33774 06/18/21 08:59:02 bo 104.0
# 43902 06/18/21 08:47:50 us 1.0
# 12710 06/18/21 09:07:59 free 60652.0
# For each column create a linked html chart
for column_name in columns_to_chart:
if column_name == "datetime":
pass
else:
if column_name in ("Total CPU", "r"):
title = f"{column_name} - {customer}"
title += f"\n{number_cpus} cores ({processor})"
else:
title = f"{column_name} - {customer}"
to_chart_df = vmstat_df.loc[vmstat_df["Type"] == column_name]
if column_name in ("Total CPU", "wa", "id", "us", "sy"):
max_y = 100
else:
# Remove outliers first, will result in nan for zero values, so needs more work
# to_chart_df = to_chart_df[((to_chart_df.metric - to_chart_df.metric.mean()) / to_chart_df.metric.std()).abs() < 3]
max_y = to_chart_df["metric"].max()
data = to_chart_df
if png_out:
simple_chart(data, column_name, title, max_y, filepath, output_prefix)
else:
linked_chart(data, column_name, title, max_y, filepath, output_prefix)
def chart_mgstat(connection, filepath, output_prefix, png_out):
# print(f"mgstat...")
customer = execute_single_read_query(connection, "SELECT * FROM overview WHERE field = 'customer';")[2]
# Read in to dataframe
df = pd.read_sql_query("SELECT | |
kk != 'lamb') or kk == 't')
and vv.shape[0] > indt.size)
if c0:
dins[kk] = vv[indt, ...]
# dlabels
dins['dlabels'] = dict.fromkeys(lk)
for kk in lk:
dins['dlabels'][kk] = {'name': dsig[kk],
'units': out[dsig[kk]]['units']}
# dextra
dextra = self._get_dextra(dextra, fordata=True)
# t0
if indt0 is None:
indt0 = 0
t0 = self._get_t0(t0, ind=indt0)
if t0 != False:
if 't' in dins.keys():
dins['t'] = dins['t'] - t0
if dextra is not None:
for tt in dextra.keys():
dextra[tt]['t'] = dextra[tt]['t'] - t0
# --------------
# Create objects
if geomcls is not False and dgeom is not None:
import tofu.geom as tfg
cam = getattr(tfg, geomcls)(dgeom=dgeom, config=config,
Etendues=Etendues, Surfaces=Surfaces,
Name=Name, Diag=ids, Exp=Exp,
dchans=dchans)
cam.Id.set_dUSR({'imas-nchMax': nchMax})
import tofu.data as tfd
conf = None if cam is not None else config
Data = getattr(tfd, datacls)(Name=Name, Diag=ids, Exp=Exp, shot=shot,
lCam=cam, config=conf, dextra=dextra,
dchans=dchans, **dins)
Data.Id.set_dUSR( {'imas-nchMax': nchMax} )
if plot:
Data.plot(draw=True, bck=bck)
if return_indch is True:
return Data, indch
else:
return Data
def calc_signal(self, ids=None, dsig=None, tlim=None, t=None, res=None,
quant=None, ref1d=None, ref2d=None,
q2dR=None, q2dPhi=None, q2dZ=None,
Brightness=None, interp_t=None, newcalc=True,
indch=None, indch_auto=False, Name=None, coefs=None,
occ_cam=None, occ_plasma=None, check_units=None,
config=None, description_2d=None, indevent=None,
dextra=None, t0=None, datacls=None, geomcls=None,
bck=True, fallback_X=None, nan=True, pos=None,
plot=True, plot_compare=None, plot_plasma=None):
""" Compute synthetic data for a diagnostics and export as DataCam1D
Some ids typically contain plasma 1d (radial) or 2d (mesh) profiles
They include for example ids:
- core_profiles
- core_sources
- edge_profiles
- edge_sources
- equilibrium
From these profiles, tofu can computed syntheic data for a diagnostic
ids which provides a geometry (channels.line_of_sight).
tofu extracts the geometry, and integrates the desired profile along
the lines of sight (LOS), using 2D interpolation when necessary
It requires:
- a diagnostic ids with geometry (LOS)
- an ids containing the 1d or 2d profile to be integrated
- if necessary, an intermediate ids to interpolate the 1d profile
to 2d (e.g.: equilibrium)
For each ids, you need to specify:
- profile ids:
profile (signal) to be integrated
quantity to be used for 1d interpolation
- equilibrium / intermediate ids:
quantity to be used for 2d interpolation
(shall be the same dimension as quantity for 1d interp.)
This method is a combination of self.to_Plasma2D() (used for extracting
profiles and equilibrium and for interpolation) and self.to_Cam() (used
for extracting diagnostic geometry) and to_Data() (used for exportig
computed result as a tofu DataCam1D instance.
Args ids, dsig, tlim, occ_plasma (occ), nan, pos, plot_plasma (plot)
are fed to to_Plasma2D()
Args indch, indch_auto, occ_cam (occ), config, description_2d, are fed
to to_Cam()
Args Name, bck, fallback_X, plot, t0, dextra are fed to to_Data()
Parameters
----------
t: None / float / np.ndarray
time at which the synthetic signal shall be computed
If None, computed for all available time steps
res: None / float
absolute spatial resolution (sampling steps) used for Line-of-Sight
intergation (in meters)
quant: None / str
Shortcut of the quantity to be integrated
ref1d: None / str
Shortcut of the quantity to be used as reference for 1d
interpolation
ref2d: None / str
Shortcut of the quantity to be used as reference for 2d
interpolation
q2dR: None / str
If integrating an anisotropic vector field (e.g. magnetic field)
q2dR if the shortcut of the R-component of the quantity
q2dPhi: None / str
If integrating an anisotropic vector field (e.g. magnetic field)
q2dPhi if the shortcut of the Phi-component of the quantity
q2dR: None / str
If integrating an anisotropic vector field (e.g. magnetic field)
q2dZ if the shortcut of the Z-component of the quantity
Brightness: bool
Flag indicating whether the result shall be returned as a
Brightness (i.e.: line integral) or an incident flux (Brightness x
Etendue), which requires the Etendue
plot_compare: bool
Flag indicating whether to plot the experimental data against the
computed synthetic data
Return
------
sig: DataCam1D
DataCam1D instance
"""
# Check / format inputs
if check_units is None:
check_units = True
if plot is None:
plot = True
if plot:
if plot_compare is None:
plot_compare = True
if plot_plasma is None:
plot_plasma = True
# Get experimental data first if relevant
# to get correct indch for comparison
if plot and plot_compare:
data, indch = self.to_Data(ids, indch=indch,
indch_auto=indch_auto, t0=t0,
config=config, tlim=tlim,
indevent=indevent,
description_2d=description_2d,
return_indch=True, plot=False)
# Get camera
cam = self.to_Cam(ids=ids, indch=indch, indch_auto=indch_auto,
Name=None, occ=occ_cam,
config=config, description_2d=description_2d,
plot=False, nan=True, pos=None)
# Get relevant parameters
dsig, dq, lq = _comp_toobjects.signal_get_synth(
ids, dsig,
quant, ref1d, ref2d, q2dR, q2dPhi, q2dZ,
didsdiag=self._didsdiag, lidsplasma=self._lidsplasma,
dshort=self._dshort, dcomp=self._dcomp)
# Get relevant plasma
plasma = self.to_Plasma2D(tlim=tlim, indevent=indevent,
dsig=dsig, t0=t0,
Name=None, occ=occ_plasma,
config=cam.config, out=object,
plot=False, dextra=dextra,
nan=True, pos=None)
# Intermediate computation if necessary
ani = False
if ids == 'bremsstrahlung_visible':
try:
lamb = self.get_data(dsig={ids: 'lamb'},
stack=True)[ids]['lamb']['data']
except Exception as err:
lamb = 5238.e-10
msg = "bremsstrahlung_visible.lamb could not be retrived!\n"
msg += " => fallback to lamb = 5338.e-10 m (WEST case)"
warnings.warn(msg)
out = plasma.compute_bremzeff(Te='core_profiles.1dTe',
ne='core_profiles.1dne',
zeff='core_profiles.1dzeff',
lamb=lamb)
quant, _, units = out
origin = 'f(core_profiles, bremsstrahlung_visible)'
depend = ('core_profiles.t','core_profiles.1dTe')
plasma.add_quantity(key='core_profiles.1dbrem', data=quant,
depend=depend, origin=origin, units=units,
dim=None, quant=None, name=None)
dq['quant'] = ['core_profiles.1dbrem']
elif ids == 'polarimeter':
lamb = self.get_data(dsig={ids: 'lamb'},
stack=True)[ids]['lamb']['data'][0]
# Get time reference
doutt, dtut, tref = plasma.get_time_common(lq)
if t is None:
t = tref
# Add necessary 2dne (and time reference)
ne2d, tne2d = plasma.interp_pts2profile(quant='core_profiles.1dne',
ref1d='core_profiles.1drhotn',
ref2d='equilibrium.2drhotn',
t=t, interp_t='nearest')
# Add fanglev
out = plasma.compute_fanglev(BR='equilibrium.2dBR',
BPhi='equilibrium.2dBT',
BZ='equilibrium.2dBZ',
ne=ne2d, tne=tne2d, lamb=lamb)
fangleRPZ, tfang, units = out
plasma.add_ref(key='tfangleRPZ', data=tfang, group='time')
origin = 'f(equilibrium, core_profiles, polarimeter)'
depend = ('tfangleRPZ','equilibrium.mesh')
plasma.add_quantity(key='2dfangleR', data=fangleRPZ[0,...],
depend=depend, origin=origin, units=units,
dim=None, quant=None, name=None)
plasma.add_quantity(key='2dfanglePhi', data=fangleRPZ[1,...],
depend=depend, origin=origin, units=units,
dim=None, quant=None, name=None)
plasma.add_quantity(key='2dfangleZ', data=fangleRPZ[2,...],
depend=depend, origin=origin, units=units,
dim=None, quant=None, name=None)
dq['q2dR'] = ['2dfangleR']
dq['q2dPhi'] = ['2dfanglePhi']
dq['q2dZ'] = ['2dfangleZ']
dq['Type'] = ['sca']
ani = True
for kk,vv in dq.items():
c0 = [vv is None,
type(vv) is list and len(vv) == 1 and type(vv[0]) is str]
if not any(c0):
msg = "All in dq must be None or list of 1 string !\n"
msg += " - Provided: dq[%s] = %s"%(kk,str(vv))
raise Exception(msg)
if vv is not None:
dq[kk] = vv[0]
# Check units of integrated field
if check_units is True:
if 'quant' in dq.keys():
units_input = plasma._ddata[dq['quant']]['units']
else:
units_input = plasma._ddata[dq['q2dR']]['units']
if any([ss in units_input for ss in ['W', 'ph', 'photons']]):
if 'sr^-1' not in units_input:
dq['coefs'] = 1./(4.*np.pi)
if ids == 'interferometer':
# For intereferometers, the data corresponds to 2 laser passages
dq['coefs'] = 2.
if ids == 'polarimeter':
# For polarimeter, the vect is along the LOS
# it is not the direction of
dq['coefs'] = -2.
if coefs is not None:
dq['coefs'] = dq.get('coefs', 1.)*coefs
# Calculate synthetic signal
if Brightness is None:
Brightness = self._didsdiag[ids]['synth'].get('Brightness', None)
dq['fill_value'] = 0.
sig, units = cam.calc_signal_from_Plasma2D(plasma, res=res, t=t,
Brightness=Brightness,
newcalc=newcalc,
plot=False, **dq)
sig._dextra = plasma.get_dextra(dextra)
# Safety check regarding Brightness
_, _, dsig_exp = _comp_toobjects.data_checkformat_dsig(
ids, dsig=None, data=None, X=None,
datacls=None, geomcls=None,
lidsdiag=self._lidsdiag, dids=self._dids, didsdiag=self._didsdiag,
dshort=self._dshort, dcomp=self._dcomp)
kdata = dsig_exp['data']
B_exp = self._dshort[ids][kdata].get('Brightness', None)
err_comp = False
if Brightness != B_exp:
u_exp = self._dshort[ids][kdata].get('units')
msg = ("\nCalculated synthetic and chosen experimental data "
+ "do not seem directly comparable !\n"
+ "\t- chosen experimental data: "
+ "{}, ({}), Brightness = {}\n".format(kdata,
u_exp, B_exp)
+ "\t- calculated synthetic data: "
+ "int({}), ({}), Brightness = {}\n".format(dq['quant'],
units,
Brightness)
+ "\n => Consider changing data or Brigthness value")
err_comp = True
warnings.warn(msg)
# plot
if plot:
if plot_compare:
if err_comp:
raise Exception(msg)
sig._dlabels = data.dlabels
data.plot_compare(sig, bck=bck)
else:
sig.plot(bck=bck)
c0 = (plot_plasma
and dq.get('quant') is not None and '1d' in dq['quant'])
if c0 is True:
plasma.plot(dq['quant'], X=dq['ref1d'], bck=bck)
return sig
#############################################################
#############################################################
# Function-oriented interfaces to IdsMultiLoader
#############################################################
def load_Config(shot=None, run=None, user=None, database=None, version=None,
Name=None, occ=0, description_2d=None, plot=True):
didd = MultiIDSLoader()
didd.add_idd(shot=shot, run=run,
user=user, database=database, version=version)
didd.add_ids('wall', get=True)
return didd.to_Config(Name=Name, occ=occ,
description_2d=description_2d, plot=plot)
# occ ?
def load_Plasma2D(shot=None, run=None, user=None, database=None, version=None,
tlim=None, occ=None, dsig=None, ids=None,
config=None, description_2d=None,
Name=None, t0=None, out=object, dextra=None,
plot=None, plot_sig=None, plot_X=None, bck=True):
didd = MultiIDSLoader()
didd.add_idd(shot=shot, run=run,
user=user, database=database, version=version)
if dsig | |
# output the row that lists the total transfers (current and pending) row
def xfer_row(self, lm):
tr = HTMLgen.TR()
tr.append(HTMLgen.TD(HTMLgen.Font("Ongoing%sTransfers"%(NBSP,),
color=BRICKRED, html_escape='OFF')))
tr.append(HTMLgen.TD(self.data_dict[lm][enstore_constants.TOTALONXFERS]))
tr.append(HTMLgen.TD(HTMLgen.Font("Pending%sTransfers"%(NBSP,),
color=BRICKRED, html_escape='OFF')))
tr.append(HTMLgen.TD(self.data_dict[lm][enstore_constants.TOTALPXFERS]))
# now add a link to the page with the full queue elements
tr.append(HTMLgen.TD(HTMLgen.Href(get_full_queue_name(lm),
"Full%sQueue%sElements"%(NBSP,NBSP)),
html_escape='OFF'))
return tr
def make_lm_wam_queue_rows(self, qelem, cols):
if qelem[enstore_constants.WORK] == enstore_constants.WRITE:
r_type = "Writing%s"%(NBSP,)
else:
r_type = "Reading%s"%(NBSP,)
vol = HTMLgen.Href("tape_inventory/%s"%(qelem[enstore_constants.DEVICE]),
qelem[enstore_constants.DEVICE])
mover = HTMLgen.Href("%s#%s"%(enstore_functions2.get_mover_status_filename(),
qelem[enstore_constants.MOVER]),
qelem[enstore_constants.MOVER])
ff = qelem[enstore_constants.FILE_FAMILY]
sg = qelem[enstore_constants.STORAGE_GROUP]
txt = "%s%s(%s%s%s)%susing%s%s%sfrom%s%s%sby%s%s"%(r_type, str(vol), sg, NBSP,ff, NBSP, NBSP,
str(mover), NBSP, NBSP,
enstore_functions2.strip_node(qelem[enstore_constants.NODE]),
NBSP, NBSP,
qelem[enstore_constants.USERNAME])
return HTMLgen.TR(HTMLgen.TD(txt, colspan=cols, html_escape='OFF'))
def work_at_movers_row(self, lm, cols):
the_work = self.data_dict[lm].get(enstore_constants.WORK,
enstore_constants.NO_WORK)
rows = []
if not the_work == enstore_constants.NO_WORK:
qlen = len(the_work)
max_lm_rows = self.max_lm_rows.get(lm, DEFAULT_THRESHOLDS)[0]
if max_lm_rows == DEFAULT_ALL_ROWS or not qlen > max_lm_rows:
rows_on_page = qlen
extra_rows = 0
else:
rows_on_page = max_lm_rows
extra_rows = qlen - max_lm_rows
for qelem in the_work[0:rows_on_page]:
rows.append(self.make_lm_wam_queue_rows(qelem, cols))
if extra_rows > 0:
# we will need to cut short the number of queue elements that
# we output on the main status page, and add a link to point
# to the rest that will be on another page. however, there is
# only one other page at this time
filename = "%s_%s.html"%(lm, enstore_constants.WORK)
rows.append(HTMLgen.TR(HTMLgen.TD(HTMLgen.Href(filename,
'Extra Queue Rows (%s)'%(extra_rows,)),
colspan=cols)))
qlen = max_lm_rows
new_key = "%s-%s"%(lm, enstore_constants.WORK)
self.extra_queue_pages[new_key] = (EnExtraLmQueuePages(self, lm),
filename)
rows = []
for elem in the_work[qlen:]:
rows.append(self.make_lm_wam_queue_rows(elem, cols))
self.extra_queue_pages[new_key][0].body(rows)
return rows
def make_lm_pend_read_row(self, qelem, cols):
r_type = "Pending%sread%sof%s"%(NBSP, NBSP, NBSP)
vol = HTMLgen.Href("tape_inventory/%s"%(qelem[enstore_constants.DEVICE]),
qelem[enstore_constants.DEVICE])
txt = "%s%s%sfrom%s%s%sby%s%s%s[%s]"%(r_type, str(vol), NBSP, NBSP,
enstore_functions2.strip_node(qelem[enstore_constants.NODE]),
NBSP, NBSP, qelem[enstore_constants.USERNAME], NBSP,
qelem.get(enstore_constants.REJECT_REASON, ""))
return HTMLgen.TR(HTMLgen.TD(txt, colspan=cols, html_escape='OFF'))
def make_lm_pend_write_row(self, qelem, cols):
r_type = "Pending%swrite%sfor%s%s%s%s"%(NBSP, NBSP, NBSP,
qelem[enstore_constants.STORAGE_GROUP], NBSP,
qelem[enstore_constants.FILE_FAMILY])
##r_type = "Pending%swrite%sfor%s%s"%(NBSP, NBSP, NBSP,
## qelem[enstore_constants.FILE_FAMILY])
txt = "%s%sfrom%s%s%sby%s%s%s[%s]"%(r_type, NBSP, NBSP,
enstore_functions2.strip_node(qelem[enstore_constants.NODE]),
NBSP, NBSP, qelem[enstore_constants.USERNAME], NBSP,
qelem.get(enstore_constants.REJECT_REASON, ""))
return HTMLgen.TR(HTMLgen.TD(txt, colspan=cols, html_escape='OFF'))
def pending_work_row(self, lm, cols):
the_work = self.data_dict[lm].get(enstore_constants.PENDING, {})
rows = []
extra_read_rows = []
extra_write_rows = []
filename = "%s_%s.html"%(lm, enstore_constants.PENDING)
max_lm_rows = self.max_lm_rows.get(lm, DEFAULT_THRESHOLDS)[0]
qlen = 0
# do the read queue first
if the_work and type(the_work) == types.DictionaryType:
if not the_work[enstore_constants.READ] == []:
qlen = len(the_work[enstore_constants.READ])
if max_lm_rows == DEFAULT_ALL_ROWS or qlen <= max_lm_rows:
rows_on_page = qlen
extra_rows = 0
else:
rows_on_page = max_lm_rows
extra_rows = qlen - max_lm_rows
for qelem in the_work[enstore_constants.READ][0:rows_on_page]:
rows.append(self.make_lm_pend_read_row(qelem, cols))
if extra_rows > 0:
for qelem in the_work[enstore_constants.READ][rows_on_page:]:
extra_read_rows.append(self.make_lm_pend_read_row(qelem,
cols))
if not the_work[enstore_constants.WRITE] == []:
qlen = len(the_work[enstore_constants.WRITE])
if max_lm_rows == DEFAULT_ALL_ROWS or not qlen > max_lm_rows:
rows_on_page = qlen
extra_rows = 0
else:
rows_on_page = max_lm_rows
extra_rows = qlen - max_lm_rows
for qelem in the_work[enstore_constants.WRITE][0:rows_on_page]:
#print "THW", qelem
rows.append(self.make_lm_pend_write_row(qelem, cols))
if extra_rows > 0:
for qelem in the_work[enstore_constants.WRITE][rows_on_page:]:
extra_write_rows.append(self.make_lm_pend_write_row(qelem,
cols))
extra_rows = extra_read_rows + extra_write_rows
if extra_rows:
# we will need to cut short the number of queue elements that we
# output on the main status page, and add a link to point to the
# rest that will be on another page. however, there is only one
# other page at this time
rows.append(HTMLgen.TR(HTMLgen.TD(HTMLgen.Href(filename,
'Extra Queue Rows (%s)'%(len(extra_rows),)),
colspan=cols)))
new_key = "%s-%s"%(lm, enstore_constants.PENDING)
self.extra_queue_pages[new_key] = (EnExtraLmQueuePages(self, lm),
filename)
self.extra_queue_pages[new_key][0].body(extra_rows)
return rows
# output the information for a library manager
def lm_rows(self, lm):
table_rows = []
cols = 5
# first the alive information
lm_d = self.data_dict.get(lm, {})
# if we are updating the web page faster that receiving the new
# info, then we already have a correct status
if lm_d and string.find(lm_d[enstore_constants.STATUS][0], NBSP) == -1:
if lm_d.has_key(enstore_constants.LMSTATE) and \
lm_d[enstore_constants.STATUS][0] not in NO_INFO_STATES:
# append the lm state to the status information
lm_d[enstore_constants.STATUS][0] = \
"%s%s:%s%s"%(lm_d[enstore_constants.STATUS][0], NBSP, NBSP,
lm_d[enstore_constants.LMSTATE])
# get the first word of the lm state, we will use this to
# tell if this is a bad state or not
if lm_d.has_key(enstore_constants.LMSTATE) and \
type(lm_d[enstore_constants.LMSTATE]) == types.StringType:
words = string.split(lm_d[enstore_constants.LMSTATE])
else:
words = ["",]
name = self.server_url(lm, "%s.html"%(lm,))
table_rows.append(HTMLgen.TR(HTMLgen.TD(HTMLgen.NAME(lm))))
if words[0] in BAD_LM_STATES:
table_rows.append(self.alive_row(lm, lm_d[enstore_constants.STATUS],
FUSCHIA, link=lm))
else:
table_rows.append(self.alive_row(lm, lm_d[enstore_constants.STATUS],
link = name))
# we may have gotten an error while trying to get the info,
# so check for a piece of it first
if lm_d.has_key(enstore_constants.LMSTATE):
# the rest of the lm information is in a separate table, it starts
# with the suspect volume info
lm_table = HTMLgen.TableLite(cellpadding=0,
cellspacing=0, align="LEFT",
bgcolor=YELLOW, width="100%")
lm_table.append(self.xfer_row(lm))
lm_table.append(self.null_row(cols))
lm_table.append(empty_row(cols))
rows = self.work_at_movers_row(lm, cols)
for row in rows:
lm_table.append(row)
rows = self.pending_work_row(lm, cols)
for row in rows:
lm_table.append(row)
tr = HTMLgen.TR(empty_data())
tr.append(HTMLgen.TD(lm_table, colspan=cols))
table_rows.append(tr)
return table_rows
# output all of the library manager rows and their associated movers
def library_manager_rows(self, table, skeys):
for server in skeys:
if enstore_functions2.is_library_manager(server):
# this is a library manager. output all of its info
rows = self.lm_rows(server)
if self.not_being_monitored(server):
aList = self.unmonitored_servers
else:
aList = table
for row in rows:
aList.append(row)
def mover_row(self, server):
# this is a mover. output its info
mover_d = self.data_dict.get(server, {})
name = self.server_url(server, enstore_functions2.get_mover_status_filename(),
server)
if mover_d.has_key(enstore_constants.STATE) and \
mover_d[enstore_constants.STATE]:
# append the movers state to its status information
# if we are updating the web page faster that receiving the new
# info, then we already have a correct status
if string.find(mover_d[enstore_constants.STATUS][0], NBSP) == -1 and \
mover_d[enstore_constants.STATUS][0] not in NO_INFO_STATES:
mover_d[enstore_constants.STATUS][0] = \
"%s%s:%s%s"%(mover_d[enstore_constants.STATUS][0],
NBSP, NBSP,
mover_d[enstore_constants.STATE])
# get the first word of the mover state, we will use this
# to tell if this is a bad state or not
words = string.split(mover_d[enstore_constants.STATE])
if words[0] in BAD_MOVER_STATES:
return self.alive_row(server,
mover_d[enstore_constants.STATUS],
FUSCHIA, link=name)
else:
return self.alive_row(server,
mover_d[enstore_constants.STATUS],
link=name)
else:
return self.alive_row(server,
mover_d[enstore_constants.STATUS],
link=name)
# output all of the mover rows
def mover_rows(self, table, skeys):
for server in skeys:
if enstore_functions2.is_mover(server):
if self.not_being_monitored(server):
self.unmonitored_servers.append(self.mover_row(server))
else:
table.append(HTMLgen.TR(HTMLgen.TD(HTMLgen.NAME(server))))
table.append(self.mover_row(server))
def migrator_row(self, server):
# this is a migrator. output its info
m_d = self.data_dict.get(server, {})
name = self.server_url(server, enstore_functions2.get_migrator_status_filename(),
server)
rows = []
for k in m_d:
if isinstance(m_d[k], dict) and m_d[k].has_key(enstore_constants.STATE) and \
m_d[k][enstore_constants.STATE]:
# append the migrators state to its status information
# if we are updating the web page faster that receiving the new
# info, then we already have a correct status
if string.find(m_d[enstore_constants.STATUS][0], NBSP) == -1 and \
m_d[enstore_constants.STATUS][0] not in NO_INFO_STATES:
tr = HTMLgen.TR()
tr.append(HTMLgen.TD(m_d[k][enstore_constants.STATE],
align="LEFT", colspan=3, size="-1"))
rows.append(tr)
# get the first word of the migrator state, we will use this
# to tell if this is a bad state or not
words = string.split(m_d[k][enstore_constants.STATE])
if words[0] in BAD_MIGRATOR_STATES:
return self.alive_row(server,
m_d[enstore_constants.STATUS],
FUSCHIA, link=name)
r = self.alive_row(server,
m_d[enstore_constants.STATUS],
link=name)
r.append(empty_data())
for row in rows:
r.append(row)
return r
# output all of the migrator rows
def migrator_rows(self, table, skeys):
for server in skeys:
if enstore_functions2.is_migrator(server):
m_row = self.migrator_row(server)
if self.not_being_monitored(server):
self.unmonitored_servers.append(m_row)
else:
table.append(HTMLgen.TR(HTMLgen.TD(HTMLgen.NAME(server))))
table.append(m_row)
def unmonitored_server_rows(self, table):
for row in self.unmonitored_servers:
table.append(row)
# generate the main table with all of the information
def main_table(self):
# first create the table headings for each column
tr = HTMLgen.TR()
for hdr in HEADINGS:
tr.append(self.make_th(hdr))
table = HTMLgen.TableLite(tr, align="CENTER", cellpadding=0,
cellspacing=0, bgcolor=AQUA, width="100%")
skeys = sort_keys(self.data_dict)
self.unmonitored_servers = []
self.generic_server_rows(table)
self.media_changer_rows(table, skeys)
self.udp_proxy_server_rows(table, skeys)
self.library_manager_rows(table, skeys)
self.mover_rows(table, skeys)
self.migrator_rows(table, skeys)
self.unmonitored_server_rows(table)
return table
# generate the body of the file
def body(self, data_dict):
# this is the data we will output
self.data_dict = data_dict
# create the outer table and its rows
table = self.table_top()
table.append(HTMLgen.TR(HTMLgen.TD(self.shortcut_table())))
table.append(empty_row())
table.append(empty_row())
table.append(HTMLgen.TR(HTMLgen.TD(self.main_table())))
self.trailer(table)
self.append(table)
class EnEncpStatusPage(EnBaseHtmlDoc):
error_text = {"USER ERROR, No Such File" : "No such file",
"USER ERROR, No Such Directory" : "No such directory",
"USER ERROR, Last field not a directory" : "Not a directory",
"USER ERROR, No Read Access" : "EACCES",
"USER ERROR, No Read Access" : "Cannot read file",
"USER ERROR, No Write Access" : "No write access",
"TAPE ERROR, No access" : "NOACCESS",
"USER ERROR, need at least 1 /pnfs/... path": "copying unixfile to unixfile",
"USER ERROR, Duplicated file in list" : "Duplicated entry",
"USER ERROR, Duplicated file in list" : "Duplicate entry",
"USER ERROR, Duplicate request, ignored" : "INPROGRESS",
"USER ERROR, File already exists" : "EEXIST",
"USER ERROR, Control-C'd connection" : "ENCP_GONE",
"TAPE ERROR, Admin marked tape as unavailable" : "NOTALLOWED",
"TAPE ERROR, At least 2 IO errors on vol, pending request cancelled": "STATUS=N O",
"HARDWARE FAILURE, Drive didn't go online, retrying" : "BADSWMOUNT",
"HARDWARE FAILURE, AML/2 robot failed" : "BADMOUNT",
"HARDWARE FAILURE, Read Error" : "READ_ERROR",
"HARDWARE FAILURE, Write Error" : "WRITE_ERROR",
"USER ERROR, No Local Disk Space" : "ENOSPC",
"LIBRARY MANAGER LOCKED" : "locked for external access"
}
# too general "USER ERROR" : "USERERROR",
def __init__(self, refresh=120, system_tag=""):
EnBaseHtmlDoc.__init__(self, refresh=refresh,
help_file="encpHelp.html",
system_tag=system_tag)
self.align = NO
self.title = "ENSTORE Encp History"
self.script_title_gif = "encph.gif"
self.source_server = THE_INQUISITOR
self.description = ""
self.error_keys = self.error_text.keys()
# create the body of the page. the data is a list of lists. each outer
# list element is a list of the encp data | |
"""Classes wrapping :class:`~boto3.resources.base.ServiceResource` objects.
The methods and arguments on these classes somtimes differ in name from those in Boto3's
Resources to make them easier to understand in this context..
Glossary:
service_name:
The snake_case name of an AWS service (e.g. ``ec2``)
resource_type:
The (unpluralised) snake_case name of the type of the resource in snake case (e.g. ``instance``)
secondary_attribute:
An attribute of a resource which requires a secondary API call to retrieve (e.g. VPC Attributes)
subresource:
A child resource which does not have its own ARN and must be queried by referencing
its parent resource's ID (e.g. inline role policies). Different from Boto3 subresources which simply
indicate a hierarchical relationship between resources (e.g. subnets are a child resource of vpcs).
"""
import logging
from typing import Any, Dict, Generator, List, NamedTuple, Optional, Tuple, Type
import boto3
import botocore # type: ignore
import jmespath # type: ignore
from boto3.resources.base import ServiceResource
from boto3.resources.factory import ResourceFactory
from boto3.resources.model import Action, Collection
from boto3.utils import ServiceContext
from .boto3_helpers import _clean_boto3_metadata, get_shape
from .boto3_loaders import MergedServiceLoader, ResourceMap, ServiceMap, ServiceMappingLoader
from .cache_helpers import cached_property, memoized_method
from .cloud_wanderer_resource import SecondaryAttribute
from .exceptions import (
BadRequestError,
BadServiceMapError,
BadUrnAccountIdError,
BadUrnIdentifiersError,
BadUrnRegionError,
ResourceNotFoundError,
UnsupportedResourceTypeError,
)
from .urn import URN
logger = logging.getLogger(__name__)
class CloudWandererBoto3ResourceFactory:
"""Factory class for generating Boto3 Resource objects."""
def __init__(self, boto3_session: boto3.session.Session = None) -> None:
"""Initialise the ResourceFactory.
Arguments:
boto3_session (boto3.session.Session): The :class:`boto3.session.Session` object to use for any queries.
"""
self.boto3_session = boto3_session or boto3.session.Session()
self.emitter = self.boto3_session.events
self.factory = ResourceFactory(self.emitter)
def load(self, service_name: str, resource_definitions: dict, service_definition: dict) -> Type:
"""Load the specified resource definition dictionaries into a Resource object.
Arguments:
service_name (str):
The name of the service to load (e.g. ``'ec2'``)
resource_definitions (dict):
A dict describing the resource definitions.
This is the ``'resources'`` key in each ``resource_definition`` json.
service_definition (dict):
A dict describing the service definition.
This is the ``'service'`` key in each ``resource_definition`` json.
"""
service_context = ServiceContext(
service_name=service_name,
resource_json_definitions=resource_definitions,
service_model=self._get_service_model(service_name),
service_waiter_model=None,
)
return self.factory.load_from_definition(
resource_name=service_name,
single_resource_json_definition=service_definition,
service_context=service_context,
)
@memoized_method()
def _get_service_model(self, service_name: str) -> botocore.model.ServiceModel:
"""Return the botocore service model corresponding to this service.
Arguments:
service_name: The service name to get the service model of.
"""
logger.debug("Getting service model for %s", service_name)
client = self.boto3_session.client(service_name=service_name) # type: ignore
return client.meta.service_model
class Boto3Services:
"""Wraps Boto3 Session.
Allows us to:
1. Wrap Boto3 ServiceResource objects with CloudWandererBoto3Service objects.
2. Inject custom service definitions into our :class:`CloudWandererBoto3ResourceFactory` \
and return them alongside the default Boto3 ServiceResource objects.
Used by :class:`~cloudwanderer.aws_interface.CloudWandererAWSInterface` to instantiate
services and can be used to get resources from their URN.
"""
def __init__(
self,
boto3_session: boto3.session.Session = None,
service_loader: MergedServiceLoader = None,
service_mapping_loader: ServiceMappingLoader = None,
account_id: str = None,
) -> None:
"""Initialise the Boto3SessionWrapper.
Arguments:
boto3_session:
The :class:`boto3.session.Session` object to use for any queries.
service_loader:
Optionally specify your own service loader if you wish to insert your own resources.
service_mapping_loader:
Optionally specify your own service mapping loader if you wish to insert your own service mappings.
account_id:
Optionally specify your account id to save a call to STS.
"""
self.boto3_session = boto3_session or boto3.session.Session()
self._factory = CloudWandererBoto3ResourceFactory(boto3_session=self.boto3_session)
self._loader = service_loader or MergedServiceLoader()
self._service_mapping_loader = service_mapping_loader or ServiceMappingLoader()
self._account_id = account_id
@property
def available_services(self) -> List[str]:
"""Return a list of service names that can be loaded by :meth:`Boto3Services.get_service`."""
return self._loader.available_services
@cached_property
def account_id(self) -> str:
"""Return the AWS Account ID our Boto3 session is authenticated against."""
if self._account_id:
return self._account_id
sts = self.boto3_session.client("sts")
return sts.get_caller_identity()["Account"]
def get_service(self, service_name: str, region_name: str = None, **kwargs) -> "CloudWandererBoto3Service":
"""Return the :class`CloudWandererBoto3Service` by this name.
Arguments:
service_name: The name of the service to instantiate.
region_name: The region to instantiate the service for.
**kwargs: Additional keyword args will be passed to the Boto3 client.
"""
service_method = self._get_service_method(service_name)
return CloudWandererBoto3Service(
boto3_service=service_method(client=self._get_client(service_name, region_name=region_name, **kwargs)),
service_map=ServiceMap.factory(
name=service_name,
definition=self._service_mapping_loader.get_service_mapping(service_name=service_name),
),
account_id=self.account_id,
enabled_regions=self.enabled_regions,
region_name=region_name,
boto3_session=self.boto3_session,
)
def get_empty_service(self, service_name: str, region_name: str = None) -> "CloudWandererBoto3Service":
"""Return the :class`CloudWandererBoto3Service` by this name without a Boto3 Client instantiated.
Useful for querying service/resource metadata.
Arguments:
service_name: The name of the service to instantiate.
region_name: The region to instantiate the service for.
"""
logger.debug("Getting empty service for %s in %s", service_name, region_name)
service_method = self._get_service_method(service_name)
return CloudWandererBoto3Service(
boto3_service=service_method(client=self._get_default_client(service_name)),
service_map=self._get_service_map(service_name),
account_id=self.account_id,
enabled_regions=self.enabled_regions,
region_name=region_name,
boto3_session=self.boto3_session,
)
@memoized_method()
def _get_service_map(self, service_name: str) -> ServiceMap:
logger.debug("Getting service map for %s", service_name)
return ServiceMap.factory(
name=service_name,
definition=self._service_mapping_loader.get_service_mapping(service_name=service_name),
)
@memoized_method()
def _get_default_client(self, service_name: str) -> botocore.client.BaseClient:
logger.debug("Getting default client for %s", service_name)
return self._get_client(service_name=service_name, region_name="us-east-1")
def _get_client(self, service_name: str, region_name: str = None, **kwargs) -> botocore.client.BaseClient:
return self.boto3_session.client(service_name, region_name=region_name, **kwargs) # type: ignore
@memoized_method()
def _get_service_method(self, service_name: str) -> Type:
logger.debug("Getting service_method for %s", service_name)
service_definition = self._loader.get_service_definition(service_name=service_name)
return self._factory.load(
service_name=service_name,
service_definition=service_definition["service"],
resource_definitions=service_definition["resources"],
)
def get_resource_from_urn(self, urn: URN) -> "CloudWandererBoto3Resource":
"""Return the :class:`CloudWandererBoto3Resource` resource picked out by this urn.
Arguments:
urn (URN): The urn of the resource to get.
Raises:
BadUrnAccountIdError: When the account ID of the URN does not match the account id of the current session.
"""
if urn.account_id != self.account_id:
raise BadUrnAccountIdError(f"{urn} exists in an account other than the current one ({self.account_id}).")
service = self.get_service(urn.service, urn.region)
return service.get_resource_from_urn(urn)
@cached_property
def enabled_regions(self) -> List[str]:
"""Return a list of enabled regions in this account."""
regions = self.boto3_session.client("ec2").describe_regions()["Regions"]
return [region["RegionName"] for region in regions if region["OptInStatus"] != "not-opted-in"]
class CloudWandererBoto3Service:
"""Wraps Boto3 :class:`~boto3.resources.base.ServiceResource` service-level objects.
Allows us to include additional CloudWanderer specific functionality.
The object represents an AWS service (e.g. ``ec2``) in a specific region.
Used to get resources from the API as well as get metadata about the resource type from Boto3.
"""
def __init__(
self,
boto3_service: ServiceResource,
service_map: ServiceMap,
account_id: str,
boto3_session: boto3.session.Session,
region_name: str = None,
enabled_regions: List[str] = None,
) -> None:
"""Instantiate CloudWandererBoto3Service.
Arguments:
boto3_service: The boto3 service object to wrap.
service_map: The CloudWanderer service map that provides additional context about this service.
account_id: The ID of the AWS account our session is in.
region_name: The region to get resources from for this service.
boto3_session: The Boto3 session that created this client.
enabled_regions: The list of regions currently enabled.
"""
self.boto3_service = boto3_service
self.boto3_session = boto3_session
self.service_map = service_map
self.account_id = account_id
self.region_name = region_name
self._enabled_regions = enabled_regions
@property
def resource_types(self) -> List[str]:
"""Return a list of snake_case resource types available on this service."""
collection_resource_types = [
collection.resource.type for collection in self._collections if collection.resource
]
return [
botocore.xform_name(resource.name)
for resource in self._subresources
if resource.resource and resource.resource.type in collection_resource_types
]
@property
def resource_summary(self) -> List["ResourceSummary"]:
"""Return a summary of resource types in this service."""
summaries = []
for resource_type in self.resource_types:
resource = self._get_empty_resource(resource_type)
if not resource:
logger.debug("No %s resource type found %s", resource_type, self.name)
continue
summaries.append(
ResourceSummary(
resource_type=resource_type,
resource_type_pascal=resource.resource_type_pascal,
service_friendly_name=self.friendly_name,
secondary_attribute_names=resource.secondary_attribute_names,
subresources=resource.subresource_summary,
)
)
return summaries
def _get_empty_resource(self, resource_type: str) -> Optional["CloudWandererBoto3Resource"]:
"""Return a resource object of resource_type which is not associated with a specific AWS resource.
Useful for interrogating metadata about that type of resource.
Arguments:
resource_type: The CloudWanderer style (snake_case) resource type.
"""
boto3_resource = self._get_boto3_resource(resource_type)
boto3_resource_getter = getattr(self.boto3_service, boto3_resource.name)
if not boto3_resource.resource:
return None
blank_args = ["" for identifier in boto3_resource.resource.identifiers]
return CloudWandererBoto3Resource(
account_id=self.account_id,
cloudwanderer_boto3_service=self,
boto3_resource=boto3_resource_getter(*blank_args),
service_map=self.service_map,
)
def get_resource_from_urn(self, urn: URN) -> "CloudWandererBoto3Resource":
"""Return the :class:`CloudWandererBoto3Resource` resource picked out by this urn.
Arguments:
urn (URN): The urn of the resource to get.
Raises:
BadRequestError: Occurs when the AWS API returns a 4xx HTTP error other than 404.
ResourceNotFoundError: Occurs when the AWS API Returns a 404 HTTP error.
UnsupportedResourceTypeError: Occurs when the definition for the resource does not support loading by id.
botocore.exceptions.ClientError: Boto3 Client Error
BadUrnIdentifiersError: Occurs when the URN contains fewer identifiers than is required by the reource type.
BadUrnRegionError: When the region of the URN is not possible with the service and/or resource type.
"""
boto3_service_resource = self._get_boto3_resource(urn.resource_type)
if not boto3_service_resource or not boto3_service_resource.resource:
raise UnsupportedResourceTypeError(f"Resource type {urn.resource_type} not found")
resource_map = self.service_map.get_resource_map(boto3_service_resource.name)
if self.service_map.global_service_region != urn.region and not resource_map.regional_resource:
raise BadUrnRegionError(f"{urn}'s service does not have resources in {urn.region}")
boto3_resource_getter = getattr(self.boto3_service, boto3_service_resource.name)
if len(urn.resource_id_parts) != len(boto3_service_resource.resource.identifiers):
raise BadUrnIdentifiersError(
f"An URN of | |
<gh_stars>100-1000
# -*- coding: utf-8 -*-
""" ALOHA: generalized implementation of the single-player policy from [Concurrent bandits and cognitive radio network, O.Avner & S.Mannor, 2014](https://arxiv.org/abs/1404.5421), for a generic single-player policy.
This policy uses the collision avoidance mechanism that is inspired by the classical ALOHA protocol, and any single-player policy.
"""
from __future__ import division, print_function # Python 2 compatibility
__author__ = "<NAME>"
__version__ = "0.9"
from random import random
import numpy as np
import numpy.random as rn
try:
from .BaseMPPolicy import BaseMPPolicy
from .ChildPointer import ChildPointer
from .with_proba import with_proba
except ImportError:
from BaseMPPolicy import BaseMPPolicy
from ChildPointer import ChildPointer
from with_proba import with_proba
# --- Functions to define [t, t + tnext] intervals
def tnext_beta(t, beta=0.5):
r""" Simple function, as used in MEGA: ``upper_tnext(t)`` = :math:`t^{\beta}`. Default to :math:`t^{0.5}`.
>>> tnext_beta(100, beta=0.1) # doctest: +ELLIPSIS
1.584...
>>> tnext_beta(100, beta=0.5)
10.0
>>> tnext_beta(100, beta=0.9) # doctest: +ELLIPSIS
63.095...
>>> tnext_beta(1000) # doctest: +ELLIPSIS
31.622...
"""
return t ** beta
def make_tnext_beta(beta=0.5):
r""" Returns the function :math:`t \mapsto t^{\beta}`.
>>> tnext = make_tnext_beta(0.5)
>>> tnext(100)
10.0
>>> tnext(1000) # doctest: +ELLIPSIS
31.622...
"""
def tnext(t):
return t ** beta
return tnext
def tnext_log(t, scaling=1.):
r""" Other function, not the one used in MEGA, but our proposal: ``upper_tnext(t)`` = :math:`\text{scaling} * \log(1 + t)`.
>>> tnext_log(100, scaling=1) # doctest: +ELLIPSIS
4.615...
>>> tnext_log(100, scaling=10) # doctest: +ELLIPSIS
46.151...
>>> tnext_log(100, scaling=100) # doctest: +ELLIPSIS
461.512...
>>> tnext_log(1000) # doctest: +ELLIPSIS
6.908...
"""
return scaling * np.log(1 + t)
def make_tnext_log_scaling(scaling=1.):
r""" Returns the function :math:`t \mapsto \text{scaling} * \log(1 + t)`.
>>> tnext = make_tnext_log_scaling(1)
>>> tnext(100) # doctest: +ELLIPSIS
4.615...
>>> tnext(1000) # doctest: +ELLIPSIS
6.908...
"""
def tnext(t):
return scaling * np.log(1 + t)
return tnext
# --- Class oneALOHA, for children
class oneALOHA(ChildPointer):
""" Class that acts as a child policy, but in fact it pass all its method calls to the mother class, who passes it to its i-th player.
- Except for the handleCollision method: the ALOHA collision avoidance protocol is implemented here.
"""
def __init__(self, nbPlayers, mother, playerId, nbArms,
p0=0.5, alpha_p0=0.5, ftnext=tnext_beta, beta=None):
super(oneALOHA, self).__init__(mother, playerId)
self.nbPlayers = nbPlayers #: Number of players
# Parameters for the ALOHA protocol
assert 0 <= p0 <= 1, "Error: parameter 'p0' for a ALOHA player should be in [0, 1]." # DEBUG
self.p0 = p0 #: Initial probability, should not be modified
self.p = p0 #: Current probability, can be modified
assert 0 < alpha_p0 <= 1, "Error: parameter 'alpha_p0' for a ALOHA player should be in (0, 1]." # DEBUG
self.alpha_p0 = alpha_p0 #: Parameter alpha for the recurrence equation for probability p(t)
# Parameters for the ftnext function
self.beta = beta #: Parameter beta
self._ftnext = ftnext # Function to know how long arms are tagged as unavailable. Can be a callable or None
# Find the name of the function
if ftnext is None:
if beta > 1:
self._ftnext_name = "t^{%.3g}" % beta
elif 0 < beta < 1:
self._ftnext_name = r"\sqrt[%.3g]{t}" % (1. / beta)
else:
self._ftnext_name = "t"
elif ftnext == tnext_log:
self._ftnext_name = r"\log(t)"
elif ftnext == tnext_beta:
self._ftnext_name = r"\sqrt{t}"
else:
self._ftnext_name = self._ftnext.__name__.replace("tnext_", "")
# Internal memory
self.tnext = np.zeros(nbArms, dtype=int) #: Only store the delta time
self.t = -1 #: Internal time
self.chosenArm = None #: Last chosen arm
def __str__(self):
return r"#{}<ALOHA({}, $p_0={:.3g}$, $\alpha={:.3g}$, $f(t)={}$)>".format(self.playerId + 1, self.mother._players[self.playerId], self.p0, self.alpha_p0, self._ftnext_name)
def startGame(self):
"""Start game."""
self.mother._startGame_one(self.playerId)
self.t = 0
self.p = self.p0
self.tnext.fill(0)
self.chosenArm = None
def ftnext(self, t):
"""Time until the arm is removed from list of unavailable arms."""
if self.beta is not None:
return t ** self.beta
else:
return self._ftnext(t)
def getReward(self, arm, reward):
""" Receive a reward on arm of index 'arm', as described by the ALOHA protocol.
- If not collision, receive a reward after pulling the arm.
"""
# print(" - A oneALOHA player received reward = {} on arm {}, at time t = {}...".format(reward, arm, self.t)) # DEBUG
self.mother._getReward_one(self.playerId, arm, reward)
self.p = self.p * self.alpha_p0 + (1 - self.alpha_p0) # Update proba p
def handleCollision(self, arm, reward=None):
""" Handle a collision, on arm of index 'arm'.
.. warning:: This method has to be implemented in the collision model, it is NOT implemented in the EvaluatorMultiPlayers.
.. note:: We do not care on which arm the collision occured.
"""
# print(" ---------> A oneALOHA player saw a collision on arm {}, at time t = {} ... Currently, p = {} ...".format(arm, self.t, self.p)) # DEBUG
# self.getReward(arm, self.mother.lower) # FIXED should we give a 0 reward ? Not in this model!
# 1. With proba 1 - p, give up
if with_proba(1 - self.p):
# Random time offset until when this arm self.chosenArm is not sampled
delta_tnext_k = rn.randint(low=0, high=1 + int(self.ftnext(self.t)))
self.tnext[self.chosenArm] = self.t + 1 + delta_tnext_k
# print(" - Reaction to collision on arm {}, at time t = {} : delta_tnext_k = {}, tnext[{}] = {} ...".format(arm, self.t, delta_tnext_k, self.chosenArm, self.tnext[self.chosenArm])) # DEBUG
self.p = self.p0 # Reinitialize the proba p
self.chosenArm = None # We give up this arm
# 2. With proba p, persist: nothing to do
# else:
# pass
def choice(self):
""" Identify the available arms, and use the underlying single-player policy (UCB, Thompson etc) to choose an arm from this sub-set of arms.
"""
self.t += 1
if self.chosenArm is not None:
# We can still exploit that arm
pass
else:
# We have to chose a new arm
availableArms = np.nonzero(self.tnext <= self.t)[0] # Identify available arms
result = self.mother._choiceFromSubSet_one(self.playerId, availableArms)
# print("\n - A oneALOHA player {} had to choose an arm among the set of available arms = {}, her choice was : {}, at time t = {} ...".format(self, availableArms, result, self.t)) # DEBUG
self.chosenArm = result
return self.chosenArm
# --- Class ALOHA
class ALOHA(BaseMPPolicy):
""" ALOHA: implementation of the multi-player policy from [Concurrent bandits and cognitive radio network, O.Avner & S.Mannor, 2014](https://arxiv.org/abs/1404.5421), for a generic single-player policy.
"""
def __init__(self, nbPlayers, nbArms, playerAlgo,
p0=0.5, alpha_p0=0.5, ftnext=tnext_beta, beta=None,
*args, **kwargs): # Named argument to give them in any order
"""
- nbPlayers: number of players to create (in self._players).
- playerAlgo: class to use for every players.
- nbArms: number of arms, given as first argument to playerAlgo.
- p0: initial probability p(0); p(t) is the probability of persistance on the chosenArm at time t
- alpha_p0: scaling in the update for p[t+1] <- alpha_p0 p[t] + (1 - alpha_p0)
- ftnext: general function, default to t -> t^beta, to know from where to sample a random time t_next(k), until when the chosenArm is unavailable. t -> log(1 + t) is also possible.
- (optional) beta: if present, overwrites ftnext, which will be t --> t^beta.
- `*args`, `**kwargs`: arguments, named arguments, given to playerAlgo.
Example:
>>> from Policies import *
>>> import random; random.seed(0); import numpy as np; np.random.seed(0)
>>> nbArms = 17
>>> nbPlayers = 6
>>> p0, alpha_p0 = 0.6, 0.5
>>> s = ALOHA(nbPlayers, nbArms, Thompson, p0=p0, alpha_p0=alpha_p0, ftnext=tnext_log)
>>> [ child.choice() for child in s.children ]
[6, 11, 8, 4, 8, 8]
>>> s = ALOHA(nbPlayers, nbArms, UCBalpha, p0=p0, alpha_p0=alpha_p0, beta=0.5, alpha=1)
>>> [ child.choice() for child in s.children ]
[1, 0, 5, 2, 15, 3]
- To get a list of usable players, use ``s.children``.
- Warning: ``s._players`` is for internal use ONLY!
"""
assert nbPlayers > 0, "Error, the parameter 'nbPlayers' for rhoRand class has to be > 0."
self.nbPlayers = nbPlayers #: Number of players
self.nbArms = nbArms #: Number of arms
# Internal memory
self._players = [None] * nbPlayers
self.children = [None] * nbPlayers #: List of children, fake algorithms
for playerId in range(nbPlayers):
# Initialize internal algorithm (eg. UCB, Thompson etc)
self._players[playerId] = playerAlgo(nbArms, *args, **kwargs)
# Initialize proxy child
self.children[playerId] = oneALOHA(nbPlayers, self, playerId, nbArms, p0=p0, alpha_p0=alpha_p0, ftnext=ftnext, | |
"""Agent message base class and schema."""
from collections import OrderedDict
from typing import Mapping, Union
import uuid
from marshmallow import (
EXCLUDE,
fields,
pre_load,
post_load,
pre_dump,
post_dump,
ValidationError,
)
from ..wallet.base import BaseWallet
from .decorators.base import BaseDecoratorSet
from .decorators.default import DecoratorSet
from .decorators.signature_decorator import SignatureDecorator
from .decorators.thread_decorator import ThreadDecorator
from .decorators.trace_decorator import (
TraceDecorator,
TraceReport,
TRACE_MESSAGE_TARGET,
TRACE_LOG_TARGET,
)
from .models.base import (
BaseModel,
BaseModelError,
BaseModelSchema,
resolve_class,
resolve_meta_property,
)
from .valid import UUIDFour
class AgentMessageError(BaseModelError):
"""Base exception for agent message issues."""
class AgentMessage(BaseModel):
"""Agent message base class."""
class Meta:
"""AgentMessage metadata."""
handler_class = None
schema_class = None
message_type = None
def __init__(self, _id: str = None, _decorators: BaseDecoratorSet = None):
"""
Initialize base agent message object.
Args:
_id: Agent message id
_decorators: Message decorators
Raises:
TypeError: If message type is missing on subclass Meta class
"""
super().__init__()
if _id:
self._message_id = _id
self._message_new_id = False
else:
self._message_id = str(uuid.uuid4())
self._message_new_id = True
self._message_decorators = (
_decorators if _decorators is not None else DecoratorSet()
)
if not self.Meta.message_type:
raise TypeError(
"Can't instantiate abstract class {} with no message_type".format(
self.__class__.__name__
)
)
# Not required for now
# if not self.Meta.handler_class:
# raise TypeError(
# "Can't instantiate abstract class {} with no handler_class".format(
# self.__class__.__name__))
@classmethod
def _get_handler_class(cls):
"""
Get handler class.
Returns:
The resolved class defined on `Meta.handler_class`
"""
return resolve_class(cls.Meta.handler_class, cls)
@property
def Handler(self) -> type:
"""
Accessor for the agent message's handler class.
Returns:
Handler class
"""
return self._get_handler_class()
@property
def _type(self) -> str:
"""
Accessor for the message type identifier.
Returns:
Message type defined on `Meta.message_type`
"""
return self.Meta.message_type
@property
def _id(self) -> str:
"""
Accessor for the unique message identifier.
Returns:
The id of this message
"""
return self._message_id
@_id.setter
def _id(self, val: str):
"""Set the unique message identifier."""
self._message_id = val
@property
def _decorators(self) -> BaseDecoratorSet:
"""Fetch the message's decorator set."""
return self._message_decorators
@_decorators.setter
def _decorators(self, value: BaseDecoratorSet):
"""Fetch the message's decorator set."""
self._message_decorators = value
def get_signature(self, field_name: str) -> SignatureDecorator:
"""
Get the signature for a named field.
Args:
field_name: Field name to get the signature for
Returns:
A SignatureDecorator for the requested field name
"""
return self._decorators.field(field_name).get("sig")
def set_signature(self, field_name: str, signature: SignatureDecorator):
"""
Add or replace the signature for a named field.
Args:
field_name: Field to set signature on
signature: Signature for the field
"""
self._decorators.field(field_name)["sig"] = signature
async def sign_field(
self, field_name: str, signer_verkey: str, wallet: BaseWallet, timestamp=None
) -> SignatureDecorator:
"""
Create and store a signature for a named field.
Args:
field_name: Field to sign
signer_verkey: Verkey of signer
wallet: Wallet to use for signature
timestamp: Optional timestamp for signature
Returns:
A SignatureDecorator for newly created signature
Raises:
ValueError: If field_name doesn't exist on this message
"""
value = getattr(self, field_name, None)
if value is None:
raise BaseModelError(
"{} field has no value for signature: {}".format(
self.__class__.__name__, field_name
)
)
sig = await SignatureDecorator.create(value, signer_verkey, wallet, timestamp)
self.set_signature(field_name, sig)
return sig
async def verify_signed_field(
self, field_name: str, wallet: BaseWallet, signer_verkey: str = None
) -> str:
"""
Verify a specific field signature.
Args:
field_name: The field name to verify
wallet: Wallet to use for the verification
signer_verkey: Verkey of signer to use
Returns:
The verkey of the signer
Raises:
ValueError: If field_name does not exist on this message
ValueError: If the verification fails
ValueError: If the verkey of the signature does not match the
provided verkey
"""
sig = self.get_signature(field_name)
if not sig:
raise BaseModelError("Missing field signature: {}".format(field_name))
if not await sig.verify(wallet):
raise BaseModelError(
"Field signature verification failed: {}".format(field_name)
)
if signer_verkey is not None and sig.signer != signer_verkey:
raise BaseModelError(
"Signer verkey of signature does not match: {}".format(field_name)
)
return sig.signer
async def verify_signatures(self, wallet: BaseWallet) -> bool:
"""
Verify all associated field signatures.
Args:
wallet: Wallet to use in verification
Returns:
True if all signatures verify, else false
"""
for field in self._decorators.fields.values():
if "sig" in field and not await field["sig"].verify(wallet):
return False
return True
@property
def _thread(self) -> ThreadDecorator:
"""
Accessor for the message's thread decorator.
Returns:
The ThreadDecorator for this message
"""
return self._decorators.get("thread")
@_thread.setter
def _thread(self, val: Union[ThreadDecorator, dict]):
"""
Setter for the message's thread decorator.
Args:
val: ThreadDecorator or dict to set as the thread
"""
if val is None:
self._decorators.pop("thread", None)
else:
self._decorators["thread"] = val
@property
def _thread_id(self) -> str:
"""Accessor for the ID associated with this message."""
if self._thread and self._thread.thid:
return self._thread.thid
return self._message_id
def assign_thread_from(self, msg: "AgentMessage"):
"""
Copy thread information from a previous message.
Args:
msg: The received message containing optional thread information
"""
if msg:
thread = msg._thread
thid = thread and thread.thid or msg._message_id
pthid = thread and thread.pthid
self.assign_thread_id(thid, pthid)
def assign_thread_id(self, thid: str, pthid: str = None):
"""
Assign a specific thread ID.
Args:
thid: The thread identifier
pthid: The parent thread identifier
"""
if thid or pthid:
self._thread = ThreadDecorator(thid=thid, pthid=pthid)
else:
self._thread = None
@property
def _trace(self) -> TraceDecorator:
"""
Accessor for the message's trace decorator.
Returns:
The TraceDecorator for this message
"""
return self._decorators.get("trace")
@_trace.setter
def _trace(self, val: Union[TraceDecorator, dict]):
"""
Setter for the message's trace decorator.
Args:
val: TraceDecorator or dict to set as the trace
"""
if val is None:
self._decorators.pop("trace", None)
else:
self._decorators["trace"] = val
def assign_trace_from(self, msg: "AgentMessage"):
"""
Copy trace information from a previous message.
Args:
msg: The received message containing optional trace information
"""
if msg and msg._trace:
# ignore if not a valid type
if isinstance(msg._trace, TraceDecorator) or isinstance(msg._trace, dict):
self._trace = msg._trace
def assign_trace_decorator(self, context, trace):
"""
Copy trace from a json structure.
Args:
trace: string containing trace json stucture
"""
if trace:
self.add_trace_decorator(
target=context.get("trace.target") if context else TRACE_LOG_TARGET,
full_thread=True,
)
def add_trace_decorator(
self, target: str = TRACE_LOG_TARGET, full_thread: bool = True
):
"""
Create a new trace decorator.
Args:
target: The trace target
full_thread: Full thread flag
"""
if self._trace:
# don't replace if there is already a trace decorator
# (potentially holding trace reports already)
self._trace._target = target
self._trace._full_thread = full_thread
else:
self._trace = TraceDecorator(target=target, full_thread=full_thread)
def add_trace_report(self, val: Union[TraceReport, dict]):
"""
Append a new trace report.
Args:
val: The trace target
"""
if not self._trace:
self.add_trace_decorator(target=TRACE_MESSAGE_TARGET, full_thread=True)
self._trace.append_trace_report(val)
class AgentMessageSchema(BaseModelSchema):
"""AgentMessage schema."""
class Meta:
"""AgentMessageSchema metadata."""
model_class = None
signed_fields = None
unknown = EXCLUDE
# Avoid clobbering keywords
_type = fields.Str(
data_key="@type",
dump_only=True,
required=False,
description="Message type",
example="did:sov:BzCbsNYhMrjHiqZDTUASHg;spec/my-family/1.0/my-message-type",
)
_id = fields.Str(
data_key="@id",
required=False,
description="Message identifier",
example=UUIDFour.EXAMPLE,
)
def __init__(self, *args, **kwargs):
"""
Initialize an instance of AgentMessageSchema.
Raises:
TypeError: If Meta.model_class has not been set
"""
super().__init__(*args, **kwargs)
self._decorators = DecoratorSet()
self._decorators_dict = None
self._signatures = {}
@pre_load
def extract_decorators(self, data: Mapping, **kwargs):
"""
Pre-load hook to extract the decorators and check the signed fields.
Args:
data: Incoming data to parse
Returns:
Parsed and modified data
Raises:
ValidationError: If a field signature does not correlate
to a field in the message
ValidationError: If the message defines both a field signature
and a value for the same field
ValidationError: If there is a missing field signature
"""
processed = self._decorators.extract_decorators(data, self.__class__)
expect_fields = resolve_meta_property(self, "signed_fields") or ()
found_signatures = {}
for field_name, field in self._decorators.fields.items():
if "sig" in field:
if field_name not in expect_fields:
raise ValidationError(
f"Encountered unexpected field signature: {field_name}"
)
if field_name in processed:
raise ValidationError(
f"Message defines both field signature and value: {field_name}"
)
found_signatures[field_name] = field["sig"]
processed[field_name], _ = field["sig"].decode() # _ = timestamp
for field_name in expect_fields:
if field_name not in found_signatures:
raise ValidationError(f"Expected field signature: {field_name}")
return processed
@post_load
def populate_decorators(self, obj, **kwargs):
"""
Post-load hook to populate decorators on the message.
Args:
obj: The AgentMessage object
Returns:
The AgentMessage object with populated decorators
"""
obj._decorators = self._decorators
return obj
@pre_dump
def check_dump_decorators(self, obj, **kwargs):
"""
Pre-dump hook to validate and load the message decorators.
Args:
obj: The AgentMessage object
Raises:
BaseModelError: If a decorator does not validate
"""
decorators = obj._decorators.copy()
signatures = OrderedDict()
for name, field in decorators.fields.items():
if "sig" in field:
| |
<filename>cosmosis/main.py
#!/usr/bin/env python
import sys
import configparser
import argparse
import os
import pdb
from .runtime.config import Inifile, CosmosisConfigurationError
from .runtime.pipeline import LikelihoodPipeline
from .runtime import mpi_pool
from .runtime import process_pool
from .runtime.utils import ParseExtraParameters, stdout_redirected, import_by_path
from .samplers.sampler import Sampler, ParallelSampler, Hints
from . import output as output_module
from .runtime.handler import activate_segfault_handling
RUNTIME_INI_SECTION = "runtime"
def demo_1_special (args):
if "demo1.ini" in args.inifile:
print()
print("Congratulations: you have just run cosmosis demo one!")
if os.path.exists("./conda"):
print()
print("You can make plots of the outputs of this using this command:")
print(" postprocess demos/demo1.ini -o plots -p demo1")
print()
print("If you get a message about 'Abort Trap 6' then see the FAQ:")
print("https://bitbucket.org/joezuntz/cosmosis/wiki/FAQ")
print()
print("Then you can try out the other demos...")
print("... and read the information about plotting their output and what they are doing online.")
print("Please get in touch with any problems, ideally by filing an Issue. Thanks!")
else:
print("You can make plots of the outputs of this using the command:")
print()
print("postprocess demos/demo1.ini -o plots -p demo1")
print()
print("Then you can try out the other demos...")
print("... and read the information about plotting their output and what they are doing online.")
print("Please get in touch with any problems, ideally by filing an Issue. Thanks!")
print()
def demo_10_special (args):
if "demo10.ini" in args.inifile and not os.getenv ("HALOFIT", ""):
print()
print("Welcome to demo10!")
print()
print("**PLEASE NOTE**:")
print()
print("There are two flavours of this demo, selected through an ")
print("environment variable called `HALOFIT'; this variable is not ")
print("currently set, so we are giving it the value `halofit'.")
print("Please see the wiki for more information: ")
print("https://bitbucket.org/joezuntz/cosmosis/wiki/Demo10.")
os.environ ["HALOFIT"] = "halofit"
def demo_20a_special (args):
if "demo20a.ini" in args.inifile:
print ()
print ("You have completed demo20a, now run demo20b and compare")
print ("results with demo5!")
def demo_20b_special (args):
if "demo20b.ini" in args.inifile and not os.path.isfile ("./demo20a.txt"):
print ()
print ("********************************************************")
print ("*** YOU MUST RUN demo20a BEFORE YOU CAN RUN demo20b. ***")
print ("********************************************************")
def sampler_main_loop(sampler, output, pool, is_root):
# Run the sampler until convergence
# which really means "finished" here -
# a sampler can "converge" just by reaching the
# limit of the number of samples it is allowed.
if is_root:
while not sampler.is_converged():
sampler.execute()
#Flush any output. This is to stop
#a problem in some MPI cases where loads
#of output is built up before being written.
if output:
output.flush()
# If we are in parallel tell the other processors to end the
# loop and prepare for the next sampler
if pool and sampler.is_parallel_sampler:
pool.close()
else:
if sampler.is_parallel_sampler:
sampler.worker()
def write_header_output(output, params, values, pipeline):
# If there is an output file, save the ini information to
# it as well. We do it here since it's nicer to have it
# after the sampler options that get written in sampler.config.
# Create a buffer to store the output:
output.comment("START_OF_PARAMS_INI")
comment_wrapper = output.comment_file_wrapper()
params.write(comment_wrapper)
output.comment("END_OF_PARAMS_INI")
# Do the same with the values file.
# Unfortunately that means reading it in again;
# if we ever refactor this bit we could eliminate that.
if isinstance(values, Inifile):
values_ini = values
elif values is None:
values_ini=Inifile(pipeline.values_filename)
else:
values_ini=values
output.comment("START_OF_VALUES_INI")
values_ini.write(comment_wrapper)
output.comment("END_OF_VALUES_INI")
# And the same with the priors
output.comment("START_OF_PRIORS_INI")
for priors_file in pipeline.priors_files:
if isinstance(priors_file, Inifile):
prior_ini = priors_file
else:
prior_ini=Inifile(priors_file)
prior_ini.write(comment_wrapper)
output.comment("END_OF_PRIORS_INI")
def setup_output(sampler_class, sampler_number, ini, pool, number_samplers, sample_method, resume):
needs_output = sampler_class.needs_output and \
(pool is None or pool.is_master() or sampler_class.parallel_output)
if not needs_output:
return None
#create the output files and methods.
try:
output_options = dict(ini.items('output'))
except configparser.NoSectionError:
raise ValueError("ERROR:\nFor the sampler (%s) you chose in the [runtime] section of the ini file I also need an [output] section describing how to save results\n\n"%sample_method)
#Additionally we tell the output here if
#we are parallel or not.
if (pool is not None) and (sampler_class.parallel_output):
output_options['rank'] = pool.rank
output_options['parallel'] = pool.size
#Give different output filenames to the different sampling steps
#Only change if this is not the last sampling step - the final
#one retains the name in the output file.
# Change, e.g. demo17.txt to demo17.fisher.txt
if ("filename" in output_options) and (sampler_number<number_samplers-1):
filename = output_options['filename']
filename, ext = os.path.splitext(filename)
filename += '.' + sampler_name
filename += ext
output_options['filename'] = filename
#Generate the output from a factory
output = output_module.output_from_options(output_options, resume)
output.metadata("sampler", sample_method)
if ("filename" in output_options):
print("* Saving output -> {}".format(output_options['filename']))
return output
def run_cosmosis(args, pool=None, ini=None, pipeline=None, values=None):
# In case we need to hand-hold a naive demo-10 user.
# Load configuration.
is_root = (pool is None) or pool.is_master()
if ini is None:
ini = Inifile(args.inifile, override=args.params, print_include_messages=is_root)
pre_script = ini.get(RUNTIME_INI_SECTION, "pre_script", fallback="")
post_script = ini.get(RUNTIME_INI_SECTION, "post_script", fallback="")
if is_root:
# This decodes the exist status
status = os.WEXITSTATUS(os.system(pre_script))
if status:
raise RuntimeError("The pre-run script {} retuned non-zero status {}".format(
pre_script, status))
if is_root and args.mem:
from cosmosis.runtime.memmon import MemoryMonitor
# This launches a memory monitor that prints out (from a new thread)
# the memory usage every args.mem seconds
mem = MemoryMonitor.start_in_thread(interval=args.mem)
# Create pipeline.
if pipeline is None:
cleanup_pipeline = True
pool_stdout = ini.getboolean(RUNTIME_INI_SECTION, "pool_stdout", fallback=False)
if is_root or pool_stdout:
pipeline = LikelihoodPipeline(ini, override=args.variables, values=values, only=args.only)
else:
# Suppress output on everything except the master process
if pool_stdout:
pipeline = LikelihoodPipeline(ini, override=args.variables, only=args.only)
else:
with stdout_redirected():
pipeline = LikelihoodPipeline(ini, override=args.variables, only=args.only)
if pipeline.do_fast_slow:
pipeline.setup_fast_subspaces()
else:
# We should not cleanup a pipeline which we didn't make
cleanup_pipeline = False
# This feature lets us import additional samplers at runtime
sampler_files = ini.get(RUNTIME_INI_SECTION, "import_samplers", fallback="").split()
for i, sampler_file in enumerate(sampler_files):
# give the module a new name to avoid name clashes if people
# just call their thing by the same name
import_by_path('additional_samplers_{}'.format(i), sampler_file)
# determine the type(s) of sampling we want.
sample_methods = ini.get(RUNTIME_INI_SECTION, "sampler", fallback="test").split()
for sample_method in sample_methods:
if sample_method not in Sampler.registry:
raise ValueError("Unknown sampler method %s" % (sample_method,))
#Get that sampler from the system.
sampler_classes = [Sampler.registry[sample_method] for sample_method in sample_methods]
if pool:
if not any(issubclass(sampler_class,ParallelSampler) for sampler_class in sampler_classes):
if len(sampler_classes)>1:
raise ValueError("None of the samplers you chose support parallel execution!")
else:
raise ValueError("The sampler you chose does not support parallel execution!")
for sampler_class in sampler_classes:
if isinstance(pool, process_pool.Pool) and issubclass(sampler_class,ParallelSampler) and not sampler_class.supports_smp:
name = sampler_class.__name__[:-len("Sampler")].lower()
raise ValueError("Sorry, the {} sampler does not support the --smp flag.".format(name))
number_samplers = len(sampler_classes)
#To start with we do not have any estimates of
#anything the samplers might give us like centers
#or covariances.
distribution_hints = Hints()
#Now that we have a sampler we know whether we will need an
#output file or not. By default new samplers do need one.
for sampler_number, (sampler_class, sample_method) in enumerate(
zip(sampler_classes, sample_methods)):
sampler_name = sampler_class.__name__[:-len("Sampler")].lower()
# The resume feature lets us restart from an existing file.
# It's not fully rolled out to all the suitable samplers yet though.
resume = ini.getboolean(RUNTIME_INI_SECTION, "resume", fallback=False)
# Not all samplers can be resumed.
if resume and not sampler_class.supports_resume:
print("NOTE: You set resume=T in the [runtime] section but the sampler {} does not support resuming yet. I will ignore this option.".format(sampler_name))
resume=False
if is_root:
print("****************************")
print("* Running sampler {}/{}: {}".format(sampler_number+1,number_samplers, sampler_name))
output = setup_output(sampler_class, sampler_number, ini, pool, number_samplers, sample_method, resume)
print("****************************")
#Initialize our sampler, with the class we got above.
#It needs an extra pool argument if it is a ParallelSampler.
#All the parallel samplers can also act serially too.
if pool and sampler_class.is_parallel_sampler:
sampler = sampler_class(ini, pipeline, output, pool)
else:
sampler = sampler_class(ini, pipeline, output)
#Set up the sampler - for example loading
#any resources it needs or checking the ini file
#for additional parameters.
sampler.distribution_hints.update(distribution_hints)
sampler.config()
# Potentially resume
if resume and sampler_class.needs_output and \
sampler_class.supports_resume and \
(is_root or sampler_class.parallel_output):
sampler.resume()
if output:
write_header_output(output, ini, values, pipeline)
sampler_main_loop(sampler, output, pool, is_root)
distribution_hints.update(sampler.distribution_hints)
if output:
output.close()
if cleanup_pipeline:
pipeline.cleanup()
if is_root and args.mem:
mem.stop()
# Extra-special actions we take to mollycoddle a brand-new user!
demo_1_special (args)
demo_20a_special (args)
# User can specify in the runtime section a post-run | |
import re
import transaction
from collections import namedtuple, defaultdict
from onegov.core.security import Public, Private, Secret
from onegov.core.utils import render_file
from onegov.directory import Directory
from onegov.directory import DirectoryCollection
from onegov.directory import DirectoryEntry
from onegov.directory import DirectoryZipArchive
from onegov.directory.archive import DirectoryFileNotFound
from onegov.directory.errors import DuplicateEntryError
from onegov.directory.errors import MissingColumnError
from onegov.directory.errors import MissingFileError
from onegov.directory.errors import ValidationError
from onegov.form import FormCollection, as_internal_id
from onegov.form.errors import InvalidFormSyntax, MixedTypeError, \
DuplicateLabelError
from onegov.form.fields import UploadField
from onegov.org import OrgApp, _
from onegov.org.forms import DirectoryForm, DirectoryImportForm
from onegov.org.forms.generic import ExportForm
from onegov.org.layout import DirectoryCollectionLayout
from onegov.org.layout import DirectoryEntryCollectionLayout
from onegov.org.layout import DirectoryEntryLayout
from onegov.org.models import DirectorySubmissionAction
from onegov.org.models import ExtendedDirectory, ExtendedDirectoryEntry
from onegov.core.elements import Link
from purl import URL
from tempfile import NamedTemporaryFile
from webob.exc import HTTPForbidden
from onegov.org.models.directory import ExtendedDirectoryEntryCollection
def get_directory_form_class(model, request):
return ExtendedDirectory().with_content_extensions(DirectoryForm, request)
def get_directory_entry_form_class(model, request):
form_class = ExtendedDirectoryEntry().with_content_extensions(
model.directory.form_class, request)
class OptionalMapPublicationForm(form_class):
def on_request(self):
if model.directory.enable_map == 'no':
self.delete_field('coordinates')
if not model.directory.enable_publication and not request.is_admin:
self.delete_field('publication_start')
self.delete_field('publication_end')
return OptionalMapPublicationForm
def get_submission_form_class(model, request):
return model.directory.form_class_for_submissions(change_request=False)
def get_change_request_form_class(model, request):
return model.directory.form_class_for_submissions(change_request=True)
@OrgApp.html(
model=DirectoryCollection,
template='directories.pt',
permission=Public)
def view_directories(self, request, layout=None):
return {
'title': _("Directories"),
'layout': layout or DirectoryCollectionLayout(self, request),
'directories': request.exclude_invisible(self.query()),
'link': lambda directory: request.link(
ExtendedDirectoryEntryCollection(
directory,
published_only=not request.is_manager
)
)
}
@OrgApp.view(
model=Directory,
permission=Public)
def view_directory_redirect(self, request):
return request.redirect(request.class_link(
ExtendedDirectoryEntryCollection, {'directory_name': self.name}
))
@OrgApp.form(model=DirectoryCollection, name='new', template='form.pt',
permission=Secret, form=get_directory_form_class)
def handle_new_directory(self, request, form, layout=None):
if form.submitted(request):
try:
directory = self.add_by_form(form, properties=('configuration', ))
except DuplicateEntryError as e:
request.alert(_("The entry ${name} exists twice", mapping={
'name': e.name
}))
transaction.abort()
return request.redirect(request.link(self))
request.success(_("Added a new directory"))
return request.redirect(
request.link(ExtendedDirectoryEntryCollection(directory)))
layout = layout or DirectoryCollectionLayout(self, request)
layout.breadcrumbs = [
Link(_("Homepage"), layout.homepage_url),
Link(_("Directories"), request.link(self)),
Link(_("New"), request.link(self, name='new'))
]
return {
'layout': layout,
'title': _("New Directory"),
'form': form,
'form_width': 'huge',
}
@OrgApp.form(model=ExtendedDirectoryEntryCollection, name='edit',
template='directory_form.pt', permission=Secret,
form=get_directory_form_class)
def handle_edit_directory(self, request, form, layout=None):
migration = None
error = None
try:
if form.submitted(request):
save_changes = True
if self.directory.entries:
migration = self.directory.migration(
form.structure.data,
form.configuration
)
if migration.changes:
if not migration.possible:
save_changes = False
request.alert(_(
"The requested change cannot be performed, "
"as it is incompatible with existing entries"
))
else:
if not request.params.get('confirm'):
form.action += '&confirm=1'
save_changes = False
if save_changes:
form.populate_obj(self.directory)
try:
self.session.flush()
except ValidationError as e:
error = e
error.link = request.class_link(DirectoryEntry, {
'directory_name': self.directory.name,
'name': e.entry.name
})
transaction.abort()
else:
request.success(_("Your changes were saved"))
return request.redirect(request.link(self))
elif not request.POST:
form.process(obj=self.directory)
except InvalidFormSyntax as e:
request.warning(
_("Syntax Error in line ${line}", mapping={'line': e.line})
)
except AttributeError:
request.warning(_("Syntax error in form"))
except MixedTypeError as e:
request.warning(
_("Syntax error in field ${field_name}",
mapping={'field_name': e.field_name})
)
except DuplicateLabelError as e:
request.warning(
_("Error: Duplicate label ${label}", mapping={'label': e.label})
)
layout = layout or DirectoryCollectionLayout(self, request)
layout.breadcrumbs = [
Link(_("Homepage"), layout.homepage_url),
Link(_("Directories"), request.link(self)),
Link(_(self.directory.title), request.link(self)),
Link(_("Edit"), '#')
]
return {
'layout': layout,
'title': self.directory.title,
'form': form,
'form_width': 'large',
'migration': migration,
'model': self,
'error': error,
'error_translate': lambda text: request.translate(_(text)),
'directory': self.directory,
}
@OrgApp.view(
model=ExtendedDirectoryEntryCollection,
permission=Secret,
request_method='DELETE')
def delete_directory(self, request):
request.assert_valid_csrf_token()
session = request.session
for entry in self.directory.entries:
session.delete(entry)
DirectoryCollection(session).delete(self.directory)
request.success(_("The directory was deleted"))
def get_filters(request, self, keyword_counts=None, view_name=None):
Filter = namedtuple('Filter', ('title', 'tags'))
filters = []
empty = tuple()
radio_fields = set(
f.id for f in self.directory.fields if f.type == 'radio'
)
def link_title(field_id, value):
if keyword_counts is None:
return value
count = keyword_counts.get(field_id, {}).get(value, 0)
return f'{value} ({count})'
for keyword, title, values in self.available_filters(sort_choices=False):
filters.append(Filter(title=title, tags=tuple(
Link(
text=link_title(keyword, value),
active=value in self.keywords.get(keyword, empty),
url=request.link(self.for_filter(
singular=keyword in radio_fields,
**{keyword: value}
), name=view_name),
rounded=keyword in radio_fields
) for value in values
)))
return filters
def keyword_count(request, collection):
self = collection
keywords = tuple(
as_internal_id(k)
for k in self.directory.configuration.keywords or tuple()
)
fields = {f.id: f for f in self.directory.fields if f.id in keywords}
counts = {}
for model in request.exclude_invisible(self.without_keywords().query()):
for entry in model.keywords:
field_id, value = entry.split(':', 1)
if field_id in fields:
f_count = counts.setdefault(field_id, defaultdict(int))
f_count[value] += 1
return counts
@OrgApp.html(
model=ExtendedDirectoryEntryCollection,
permission=Public,
template='directory.pt')
def view_directory(self, request, layout=None):
entries = request.exclude_invisible(self.query())
keyword_counts = keyword_count(request, self)
filters = get_filters(request, self, keyword_counts)
layout = layout or DirectoryEntryCollectionLayout(self, request)
return {
'layout': layout,
'title': self.directory.title,
'entries': entries,
'directory': self.directory,
'searchwidget': self.searchwidget,
'filters': filters,
'geojson': request.link(self, name='+geojson'),
'submit': request.link(self, name='+submit'),
'show_thumbnails': layout.thumbnail_field_id and True or False,
'thumbnail_link': layout.thumbnail_link
}
@OrgApp.json(
model=ExtendedDirectoryEntryCollection,
permission=Public,
name='geojson')
def view_geojson(self, request):
q = self.query()
q = q.with_entities(
DirectoryEntry.id,
DirectoryEntry.name,
DirectoryEntry.title,
DirectoryEntry.lead,
DirectoryEntry.content["coordinates"]["lat"].label('lat'),
DirectoryEntry.content["coordinates"]["lon"].label('lon'),
DirectoryEntry.meta["access"].label('access'),
)
q = q.filter(DirectoryEntry.content["coordinates"]["lat"] != None)
with_categories = request.params.get('with-categories', False)
if with_categories:
q = q.add_column(DirectoryEntry._keywords)
# this could be done using a query, but that seems to be more verbose
entries = (c for c in q if request.is_manager or (
c.access == 'public' or not c.access
))
url_prefix = request.class_link(DirectoryEntry, {
'directory_name': self.directory.name,
'name': ''
})
def as_dict(entry):
result = {
'type': "Feature",
'properties': {
'name': entry.name,
'title': entry.title,
'lead': entry.lead,
'link': url_prefix + entry.name
},
'geometry': {
'coordinates': (entry.lon, entry.lat),
'type': "Point"
}
}
if with_categories:
categories = defaultdict(list)
for item in entry._keywords.keys():
k, v = item.split(':', 1)
categories[k].append(v)
result['properties']['categories'] = categories
return result
return tuple(as_dict(e) for e in entries)
@OrgApp.form(
model=ExtendedDirectoryEntryCollection,
permission=Private,
template='form.pt',
form=get_directory_entry_form_class,
name='new')
def handle_new_directory_entry(self, request, form, layout=None):
if form.submitted(request):
entry = self.directory.add_by_form(form, type='extended')
request.success(_("Added a new directory entry"))
return request.redirect(request.link(entry))
if form.errors:
for field in form.match_fields(include_classes=(UploadField, )):
getattr(form, field).data = {}
layout = layout or DirectoryEntryCollectionLayout(self, request)
layout.include_code_editor()
layout.breadcrumbs.append(Link(_("New"), '#'))
layout.editbar_links = []
return {
'layout': layout,
'title': _("New Directory Entry"),
'form': form,
}
@OrgApp.form(
model=DirectoryEntry,
permission=Private,
template='form.pt',
form=get_directory_entry_form_class,
name='edit')
def handle_edit_directory_entry(self, request, form, layout=None):
if form.submitted(request):
form.populate_obj(self)
request.success(_("Your changes were saved"))
return request.redirect(request.link(self))
elif not request.POST:
form.process(obj=self)
layout = layout or DirectoryEntryLayout(self, request)
layout.include_code_editor()
layout.breadcrumbs.append(Link(_("Edit"), '#'))
layout.editbar_links = []
return {
'layout': layout,
'title': self.title,
'form': form,
}
@OrgApp.form(model=ExtendedDirectoryEntryCollection,
permission=Public,
template='directory_entry_submission_form.pt',
form=get_submission_form_class,
name='submit')
def handle_submit_directory_entry(self, request, form, layout=None):
title = _("Submit a New Directory Entry")
if form.submitted(request):
forms = FormCollection(request.session)
# required by the form submissions collection
form._source = self.directory.structure
# the price per submission
if self.directory.price == 'paid':
amount = self.directory.price_per_submission
else:
amount = 0.0
submission = forms.submissions.add_external(
form=form,
state='pending',
payment_method=self.directory.payment_method,
email=form.submitter.data,
meta={
'handler_code': 'DIR',
'directory': self.directory.id.hex,
'price': {
'amount': amount,
'currency': self.directory.currency
},
'extensions': tuple(
ext for ext in self.directory.extensions
if ext != 'submitter'
),
**form.submitter_meta
},
)
# remove old submission while we are at it
self.directory.remove_old_pending_submissions()
url = URL(request.link(submission))
url = url.query_param('title', request.translate(title))
return request.redirect(url.as_string())
layout = layout or DirectoryEntryCollectionLayout(self, request)
layout.include_code_editor()
layout.breadcrumbs.append(Link(title, '#'))
layout.editbar_links = []
return {
'directory': self.directory,
'form': form,
'layout': layout,
'title': title,
'guideline': self.directory.submissions_guideline,
'button_text': _('Continue')
}
@OrgApp.form(model=DirectoryEntry,
permission=Public,
template='directory_entry_submission_form.pt',
form=get_change_request_form_class,
name='change-request')
def handle_change_request(self, request, form, layout=None):
title = _("Propose a change")
if form.submitted(request):
forms = FormCollection(request.session)
# required by the form submissions collection
form._source = self.directory.structure
extensions = [
ext for ext in self.directory.extensions if ext != 'submitter']
extensions.append('change-request')
submission = forms.submissions.add_external(
form=form,
state='pending',
email=form.submitter.data,
meta={
'handler_code': 'DIR',
'directory': self.directory.id.hex,
'directory_entry': self.id.hex,
'extensions': extensions,
**form.submitter_meta
}
)
# remove old submission while we are at it
self.directory.remove_old_pending_submissions()
url = URL(request.link(submission))
url = url.query_param('title', request.translate(title))
return request.redirect(url.as_string())
elif not request.POST:
form.process(obj=self)
layout = layout or DirectoryEntryLayout(self, request)
layout.include_code_editor()
layout.breadcrumbs.append(Link(title, '#'))
layout.editbar_links = []
return {
'directory': self.directory,
'form': form,
'layout': layout,
'title': title,
'hint': _(
"To request a change, edit the fields you would like to change, "
"leaving the other fields intact. Then submit your request."
),
'guideline': self.directory.change_requests_guideline,
'button_text': _('Continue')
}
@OrgApp.html(
model=DirectoryEntry,
permission=Public,
template='directory_entry.pt')
def view_directory_entry(self, request, layout=None):
return {
'layout': layout or DirectoryEntryLayout(self, request),
'title': self.title,
'entry': self
}
@OrgApp.view(
model=DirectoryEntry,
permission=Private,
request_method='DELETE')
def delete_directory_entry(self, request):
request.assert_valid_csrf_token()
session = request.session
session.delete(self)
request.success(_("The entry was deleted"))
@OrgApp.form(model=ExtendedDirectoryEntryCollection,
permission=Public, name='export',
template='export.pt', form=ExportForm)
def view_export(self, request, form, layout=None):
if not request.is_visible(self.directory):
return HTTPForbidden()
layout = layout or DirectoryEntryCollectionLayout(self, request)
layout.breadcrumbs.append(Link(_("Export"), '#'))
layout.editbar_links = None
if form.submitted(request):
url = URL(request.link(self, '+zip'))
url = url.query_param('format', form.format)
return request.redirect(url.as_string())
filters = get_filters(request, self, keyword_count(request, self),
view_name='+export')
if filters:
pretext = _("On the right side, you can filter the entries of this "
"directory to export.")
else:
pretext = _("Exports all entries of this directory.")
return {
'layout': layout,
'title': _("Export"),
'form': form,
'explanation': f'{request.translate(pretext)} ' + request.translate(_(
"The resulting zipfile contains the selected format as well "
"as metadata and images/files if the directory contains any."
)),
'filters': filters,
'count': len(request.exclude_invisible(self.query().all()))
}
@OrgApp.view(model=ExtendedDirectoryEntryCollection,
permission=Public, name='zip')
def view_zip_file(self, request):
if not request.is_visible(self.directory):
return HTTPForbidden()
layout = DirectoryEntryCollectionLayout(self, request)
format = request.params.get('format', 'json')
formatter = layout.export_formatter(format)
| |
<gh_stars>1-10
from __future__ import with_statement
from pypy.interpreter.baseobjspace import W_Root, BufferInterfaceNotFound
from pypy.interpreter.gateway import interp2app, unwrap_spec
from pypy.interpreter.typedef import TypeDef, GetSetProperty
from pypy.interpreter.error import OperationError, oefmt, wrap_windowserror
from rpython.rtyper.lltypesystem import rffi, lltype
from rpython.rlib import rwinreg, rwin32
from rpython.rlib.rarithmetic import r_uint, intmask
def raiseWindowsError(space, errcode, context):
message = rwin32.FormatError(errcode)
raise OperationError(space.w_WindowsError,
space.newtuple([space.newint(errcode),
space.newtext(message)]))
class W_HKEY(W_Root):
def __init__(self, space, hkey):
self.hkey = hkey
self.space = space
self.register_finalizer(space)
def _finalize_(self):
self.Close(self.space)
def as_int(self):
return rffi.cast(rffi.SIZE_T, self.hkey)
def descr_nonzero(self, space):
return space.newbool(self.as_int() != 0)
def descr_handle_get(self, space):
return space.newint(self.as_int())
def descr_repr(self, space):
return space.newtext("<PyHKEY:0x%x>" % (self.as_int(),))
def descr_int(self, space):
return space.newint(self.as_int())
def descr__enter__(self, space):
return self
def descr__exit__(self, space, __args__):
CloseKey(space, self)
def Close(self, space):
"""key.Close() - Closes the underlying Windows handle.
If the handle is already closed, no error is raised."""
CloseKey(space, self)
def Detach(self, space):
"""int = key.Detach() - Detaches the Windows handle from the handle object.
The result is the value of the handle before it is detached. If the
handle is already detached, this will return zero.
After calling this function, the handle is effectively invalidated,
but the handle is not closed. You would call this function when you
need the underlying win32 handle to exist beyond the lifetime of the
handle object.
On 64 bit windows, the result of this function is a long integer"""
key = self.as_int()
self.hkey = rwin32.NULL_HANDLE
return space.newint(key)
@unwrap_spec(key=int)
def new_HKEY(space, w_subtype, key):
hkey = rffi.cast(rwinreg.HKEY, key)
return W_HKEY(space, hkey)
descr_HKEY_new = interp2app(new_HKEY)
W_HKEY.typedef = TypeDef(
"_winreg.HKEYType",
__doc__ = """\
PyHKEY Object - A Python object, representing a win32 registry key.
This object wraps a Windows HKEY object, automatically closing it when
the object is destroyed. To guarantee cleanup, you can call either
the Close() method on the PyHKEY, or the CloseKey() method.
All functions which accept a handle object also accept an integer -
however, use of the handle object is encouraged.
Functions:
Close() - Closes the underlying handle.
Detach() - Returns the integer Win32 handle, detaching it from the object
Properties:
handle - The integer Win32 handle.
Operations:
__nonzero__ - Handles with an open object return true, otherwise false.
__int__ - Converting a handle to an integer returns the Win32 handle.
__cmp__ - Handle objects are compared using the handle value.""",
__new__ = descr_HKEY_new,
__repr__ = interp2app(W_HKEY.descr_repr),
__int__ = interp2app(W_HKEY.descr_int),
__nonzero__ = interp2app(W_HKEY.descr_nonzero),
__enter__ = interp2app(W_HKEY.descr__enter__),
__exit__ = interp2app(W_HKEY.descr__exit__),
handle = GetSetProperty(W_HKEY.descr_handle_get),
Close = interp2app(W_HKEY.Close),
Detach = interp2app(W_HKEY.Detach),
)
def hkey_w(w_hkey, space):
if space.is_w(w_hkey, space.w_None):
raise oefmt(space.w_TypeError,
"None is not a valid HKEY in this context")
elif isinstance(w_hkey, W_HKEY):
return w_hkey.hkey
elif space.isinstance_w(w_hkey, space.w_int):
return rffi.cast(rwinreg.HKEY, space.int_w(w_hkey))
elif space.isinstance_w(w_hkey, space.w_long):
return rffi.cast(rwinreg.HKEY, space.uint_w(w_hkey))
else:
raise oefmt(space.w_TypeError, "The object is not a PyHKEY object")
def CloseKey(space, w_hkey):
"""CloseKey(hkey) - Closes a previously opened registry key.
The hkey argument specifies a previously opened key.
Note that if the key is not closed using this method, it will be
closed when the hkey object is destroyed by Python."""
hkey = hkey_w(w_hkey, space)
if hkey:
ret = rwinreg.RegCloseKey(hkey)
if ret != 0:
raiseWindowsError(space, ret, 'RegCloseKey')
if isinstance(w_hkey, W_HKEY):
space.interp_w(W_HKEY, w_hkey).hkey = rwin32.NULL_HANDLE
def FlushKey(space, w_hkey):
"""FlushKey(key) - Writes all the attributes of a key to the registry.
key is an already open key, or any one of the predefined HKEY_* constants.
It is not necessary to call RegFlushKey to change a key.
Registry changes are flushed to disk by the registry using its lazy flusher.
Registry changes are also flushed to disk at system shutdown.
Unlike CloseKey(), the FlushKey() method returns only when all the data has
been written to the registry.
An application should only call FlushKey() if it requires absolute certainty that registry changes are on disk.
If you don't know whether a FlushKey() call is required, it probably isn't."""
hkey = hkey_w(w_hkey, space)
if hkey:
ret = rwinreg.RegFlushKey(hkey)
if ret != 0:
raiseWindowsError(space, ret, 'RegFlushKey')
@unwrap_spec(subkey="text", filename="text")
def LoadKey(space, w_hkey, subkey, filename):
"""LoadKey(key, sub_key, file_name) - Creates a subkey under the specified key
and stores registration information from a specified file into that subkey.
key is an already open key, or any one of the predefined HKEY_* constants.
sub_key is a string that identifies the sub_key to load
file_name is the name of the file to load registry data from.
This file must have been created with the SaveKey() function.
Under the file allocation table (FAT) file system, the filename may not
have an extension.
A call to LoadKey() fails if the calling process does not have the
SE_RESTORE_PRIVILEGE privilege.
If key is a handle returned by ConnectRegistry(), then the path specified
in fileName is relative to the remote computer.
The docs imply key must be in the HKEY_USER or HKEY_LOCAL_MACHINE tree"""
hkey = hkey_w(w_hkey, space)
ret = rwinreg.RegLoadKey(hkey, subkey, filename)
if ret != 0:
raiseWindowsError(space, ret, 'RegLoadKey')
@unwrap_spec(filename="text")
def SaveKey(space, w_hkey, filename):
"""SaveKey(key, file_name) - Saves the specified key, and all its subkeys to the specified file.
key is an already open key, or any one of the predefined HKEY_* constants.
file_name is the name of the file to save registry data to.
This file cannot already exist. If this filename includes an extension,
it cannot be used on file allocation table (FAT) file systems by the
LoadKey(), ReplaceKey() or RestoreKey() methods.
If key represents a key on a remote computer, the path described by
file_name is relative to the remote computer.
The caller of this method must possess the SeBackupPrivilege security privilege.
This function passes NULL for security_attributes to the API."""
hkey = hkey_w(w_hkey, space)
ret = rwinreg.RegSaveKey(hkey, filename, None)
if ret != 0:
raiseWindowsError(space, ret, 'RegSaveKey')
@unwrap_spec(typ=int, value="text")
def SetValue(space, w_hkey, w_subkey, typ, value):
"""SetValue(key, sub_key, type, value) - Associates a value with a specified key.
key is an already open key, or any one of the predefined HKEY_* constants.
sub_key is a string that names the subkey with which the value is associated.
type is an integer that specifies the type of the data. Currently this
must be REG_SZ, meaning only strings are supported.
value is a string that specifies the new value.
If the key specified by the sub_key parameter does not exist, the SetValue
function creates it.
Value lengths are limited by available memory. Long values (more than
2048 bytes) should be stored as files with the filenames stored in
the configuration registry. This helps the registry perform efficiently.
The key identified by the key parameter must have been opened with
KEY_SET_VALUE access."""
if typ != rwinreg.REG_SZ:
raise oefmt(space.w_ValueError, "Type must be _winreg.REG_SZ")
hkey = hkey_w(w_hkey, space)
if space.is_w(w_subkey, space.w_None):
subkey = None
else:
subkey = space.text_w(w_subkey)
with rffi.scoped_str2charp(value) as dataptr:
ret = rwinreg.RegSetValue(hkey, subkey, rwinreg.REG_SZ, dataptr, len(value))
if ret != 0:
raiseWindowsError(space, ret, 'RegSetValue')
def QueryValue(space, w_hkey, w_subkey):
"""string = QueryValue(key, sub_key) - retrieves the unnamed value for a key.
key is an already open key, or any one of the predefined HKEY_* constants.
sub_key is a string that holds the name of the subkey with which the value
is associated. If this parameter is None or empty, the function retrieves
the value set by the SetValue() method for the key identified by key.
Values in the registry have name, type, and data components. This method
retrieves the data for a key's first value that has a NULL name.
But the underlying API call doesn't return the type, Lame Lame Lame, DONT USE THIS!!!"""
hkey = hkey_w(w_hkey, space)
if space.is_w(w_subkey, space.w_None):
subkey = None
else:
subkey = space.text_w(w_subkey)
with lltype.scoped_alloc(rwin32.PLONG.TO, 1) as bufsize_p:
ret = rwinreg.RegQueryValue(hkey, subkey, None, bufsize_p)
bufSize = intmask(bufsize_p[0])
if ret == rwinreg.ERROR_MORE_DATA:
bufSize = 256
elif ret != 0:
raiseWindowsError(space, ret, 'RegQueryValue')
while True:
with lltype.scoped_alloc(rffi.CCHARP.TO, bufSize) as buf:
ret = rwinreg.RegQueryValue(hkey, subkey, buf, bufsize_p)
if ret == rwinreg.ERROR_MORE_DATA:
# Resize and retry
bufSize *= 2
bufsize_p[0] = bufSize
continue
if ret != 0:
raiseWindowsError(space, ret, 'RegQueryValue')
length = intmask(bufsize_p[0] - 1)
return space.newtext(rffi.charp2strn(buf, length))
def convert_to_regdata(space, w_value, typ):
buf = None
if typ == rwinreg.REG_DWORD:
if space.is_none(w_value) or (
space.isinstance_w(w_value, space.w_int) or
space.isinstance_w(w_value, space.w_long)):
if space.is_none(w_value):
value = r_uint(0)
else:
value = space.c_uint_w(w_value)
buflen = rffi.sizeof(rwin32.DWORD)
buf1 = lltype.malloc(rffi.CArray(rwin32.DWORD), 1, flavor='raw')
buf1[0] = value
buf = rffi.cast(rffi.CCHARP, buf1)
elif typ == rwinreg.REG_SZ or typ == rwinreg.REG_EXPAND_SZ:
if space.is_w(w_value, space.w_None):
buflen = 1
buf = lltype.malloc(rffi.CCHARP.TO, buflen, flavor='raw')
buf[0] = '\0'
else:
if space.isinstance_w(w_value, space.w_unicode):
w_value = space.call_method(w_value, 'encode',
space.newtext('mbcs'))
buf = rffi.str2charp(space.text_w(w_value))
| |
<reponame>VonAlphaBisZulu/mcs
from straindesign.strainDesignSolution import SDSolution
import numpy as np
from scipy import sparse
import time
from cobra import Model
from typing import Dict, List, Tuple
from straindesign import StrainDesignMILPBuilder, MILP_LP, SDModule
from straindesign.names import *
from warnings import warn
class StrainDesignMILP(StrainDesignMILPBuilder):
def __init__(self, model: Model, sd_modules: List[SDModule], **kwargs):
keys = {'options'}
# remove keys that are irrelevant for MILP construction
kwargs1 = kwargs.copy()
for k in keys:
if k in kwargs1:
del kwargs1[k]
super().__init__(model, sd_modules, **kwargs1)
# set keys passed in kwargs
for key,value in dict(kwargs).items():
if key in keys:
setattr(self,key,value)
# set all remaining keys to None
for key in keys:
if key not in dict(kwargs).keys():
setattr(self,key,None)
self.sd_modules = sd_modules
self.milp = MILP_LP(c =self.c,
A_ineq =self.A_ineq,
b_ineq =self.b_ineq,
A_eq =self.A_eq,
b_eq =self.b_eq,
lb =self.lb,
ub =self.ub,
vtype =self.vtype,
indic_constr=self.indic_constr,
M =self.M,
solver =self.solver)
def add_exclusion_constraints(self,z):
for i in range(z.shape[0]):
A_ineq = z[i].copy()
A_ineq.resize((1,self.milp.A_ineq.shape[1]))
b_ineq = np.sum(z[i])-1
self.A_ineq = sparse.vstack((self.A_ineq,A_ineq))
self.b_ineq += [b_ineq]
self.milp.add_ineq_constraints(A_ineq,[b_ineq])
def add_exclusion_constraints_ineq(self,z):
for j in range(z.shape[0]):
A_ineq = [1.0 if z[j,i] else -1.0 for i in self.idx_z]
A_ineq.resize((1,self.milp.A_ineq.shape[1]))
b_ineq = np.sum(z[j])-1
self.A_ineq = sparse.vstack((self.A_ineq,A_ineq))
self.b_ineq += [b_ineq]
self.milp.add_ineq_constraints(A_ineq,[b_ineq])
def sd2dict(self,sol,*args) -> Dict:
output = {}
reacID = self.model.reactions.list_attr("id")
for i in self.idx_z:
if sol[0,i] != 0 and not np.isnan(sol[0,i]):
if self.z_inverted[i]:
output[reacID[i]] = sol[0,i]
else:
output[reacID[i]] = -sol[0,i]
elif args and args[0] and (sol[0,i] == 0) and self.z_inverted[i]:
output[reacID[i]] = 0.0
return output
def solveZ(self) -> Tuple[List,int]:
x, _ , status = self.milp.solve()
z = sparse.csr_matrix([x[i] for i in self.idx_z])
return z, status
def populateZ(self,n) -> Tuple[List,int]:
x, _ , status = self.milp.populate(n)
z = sparse.csr_matrix([[x[j][i] for i in self.idx_z] for j in range(len(x))])
z.resize((len(x),self.num_z))
return z, status
def clearObjective(self):
self.milp.clear_objective()
self.c = [0]*len(self.c)
def fixObjective(self,c,cx):
self.milp.set_ineq_constraint(2,c,cx)
self.A_ineq = self.A_ineq.tolil()
self.A_ineq[2] = sparse.lil_matrix(c)
self.A_ineq = self.A_ineq.tocsr()
self.b_ineq[2] = cx
def resetObjective(self):
for i,v in enumerate(self.c_bu):
self.c[i] = v
self.milp.set_objective_idx([[i,v] for i,v in enumerate(self.c_bu)])
def setMinIntvCostObjective(self):
self.clearObjective()
for i in self.idx_z:
if i not in self.z_non_targetable:
self.c[i] = self.cost[i]
self.milp.set_objective_idx([[i,self.c[i]] for i in self.idx_z if i not in self.z_non_targetable])
def resetTargetableZ(self):
self.ub = [1 - float(i) for i in self.z_non_targetable]
self.milp.set_ub([[i,1] for i in self.idx_z if not self.z_non_targetable[i]])
def setTargetableZ(self,sol):
self.ub = [1.0 if sol[0,i] else 0.0 for i in self.idx_z]
self.milp.set_ub([[i,0] for i in self.idx_z if not sol[0,i]])
def verify_sd(self,sols) -> List:
valid = [False]*sols.shape[0]
for i,sol in zip(range(sols.shape[0]),sols):
inactive_vars = [var for z_i,var,sense in \
zip(self.cont_MILP.z_map_vars.row,self.cont_MILP.z_map_vars.col,self.cont_MILP.z_map_vars.data)\
if np.logical_xor(sol[0,z_i],sense==-1)]
active_vars = [i for i in range(self.cont_MILP.z_map_vars.shape[1]) if i not in inactive_vars]
inactive_ineqs = [ineq for z_i,ineq,sense in \
zip(self.cont_MILP.z_map_constr_ineq.row,self.cont_MILP.z_map_constr_ineq.col,self.cont_MILP.z_map_constr_ineq.data)\
if np.logical_xor(sol[0,z_i],sense==-1) ]
active_ineqs = [i for i in range(self.cont_MILP.z_map_constr_ineq.shape[1]) if i not in inactive_ineqs]
inactive_eqs = [eq for z_i,eq,sense in \
zip(self.cont_MILP.z_map_constr_eq.row,self.cont_MILP.z_map_constr_eq.col,self.cont_MILP.z_map_constr_eq.data)\
if np.logical_xor(sol[0,z_i],sense==-1) ]
active_eqs = [i for i in range(self.cont_MILP.z_map_constr_eq.shape[1]) if i not in inactive_eqs]
lp = MILP_LP( A_ineq = self.cont_MILP.A_ineq[active_ineqs,:][:,active_vars],
b_ineq = [self.cont_MILP.b_ineq[i] for i in active_ineqs],
A_eq = self.cont_MILP.A_eq[active_eqs,:][:,active_vars],
b_eq = [self.cont_MILP.b_eq[i] for i in active_eqs],
lb = [self.cont_MILP.lb[i] for i in active_vars],
ub = [self.cont_MILP.ub[i] for i in active_vars],
solver = self.solver)
valid[i] = not np.isnan(lp.slim_solve())
return valid
# Find iteratively smallest solutions
def compute_optimal(self, **kwargs):
keys = {'max_solutions','time_limit','show_no_ki'}
# set keys passed in kwargs
for key,value in dict(kwargs).items():
if key in keys:
setattr(self,key,value)
# set all remaining keys to None
for key in keys:
if key not in dict(kwargs).keys():
setattr(self,key,None)
if self.max_solutions is None:
self.max_solutions = np.inf
if self.time_limit is None:
self.time_limit = np.inf
# first check if strain doesn't already fulfill the strain design setup
if self.is_mcs_computation and self.verify_sd(sparse.csr_matrix((1,self.num_z)))[0]:
print('The strain already meets the requirements defined in the strain design setup. ' \
'No interventions are needed.')
return self.build_sd_solution([{}], OPTIMAL, SMALLEST)
# otherwise continue
endtime = time.time() + self.time_limit
status = OPTIMAL
sols = sparse.csr_matrix((0,self.num_z))
print('Finding optimal strain designs ...')
while sols.shape[0] < self.max_solutions and \
status == OPTIMAL and \
endtime-time.time() > 0:
self.milp.set_time_limit(endtime-time.time())
self.resetTargetableZ()
self.resetObjective()
self.fixObjective(self.c_bu,np.inf)
x, min_cx , status = self.milp.solve()
z = sparse.csr_matrix([x[i] for i in self.idx_z])
if np.isnan(z[0,0]):
break
output = self.sd2dict(z)
if self.is_mcs_computation:
if status in [OPTIMAL,TIME_LIMIT_W_SOL] and all(self.verify_sd(z)):
print('Strain design with cost '+str(round((z*self.cost)[0],6))+': '+str(output))
self.add_exclusion_constraints(z)
sols = sparse.vstack((sols,z))
elif status in [OPTIMAL,TIME_LIMIT_W_SOL]:
print('Invalid (minimal) solution found: '+ str(output))
self.add_exclusion_constraints(z)
if status != OPTIMAL:
break
else:
# Verify solution and explore subspace to get minimal intervention sets
print('Found solution with objective value '+str(min_cx))
print('Minimizing number of interventions in subspace with '+str(sum(z.toarray()[0]))+' possible targets.')
self.fixObjective(self.c_bu,min_cx)
self.setMinIntvCostObjective()
self.setTargetableZ(z)
while sols.shape[0] < self.max_solutions and \
status == OPTIMAL and \
endtime-time.time() > 0:
self.milp.set_time_limit(endtime-time.time())
z1, status1 = self.solveZ()
output = self.sd2dict(z1)
if status1 in [OPTIMAL,TIME_LIMIT_W_SOL] and all(self.verify_sd(z1)):
print('Strain design with cost '+str(round((z1*self.cost)[0],6))+': '+str(output))
self.add_exclusion_constraints(z1)
sols = sparse.vstack((sols,z1))
elif status1 in [OPTIMAL,TIME_LIMIT_W_SOL]:
print('Invalid minimal solution found: '+ str(output))
self.add_exclusion_constraints(z)
else: # return to outside loop
break
if status == INFEASIBLE and sols.shape[0] > 0: # all solutions found
status = OPTIMAL
if status == TIME_LIMIT and sols.shape[0] > 0: # some solutions found, timelimit reached
status = TIME_LIMIT_W_SOL
if endtime-time.time() > 0 and sols.shape[0] > 0:
print('Finished solving strain design MILP. ')
if 'strainDesignMILP' in self.__module__:
print(str(sols.shape[0]) +' solutions found.')
elif endtime-time.time() > 0:
print('Finished solving strain design MILP.')
if 'strainDesignMILP' in self.__module__:
print(' No solutions exist.')
else:
print('Time limit reached.')
# Translate solutions into dict
m=sd_dict = []
for sol in sols:
sd_dict += [self.sd2dict(sol,self.show_no_ki)]
return self.build_sd_solution(sd_dict, status, SMALLEST)
# Find iteratively intervention sets of arbitrary size or quality
# output format: list of 'dict' (default) or 'sparse'
def compute(self, **kwargs):
keys = {'max_solutions','time_limit','show_no_ki'}
# set keys passed in kwargs
for key,value in kwargs.items():
if key in keys:
setattr(self,key,value)
# set all remaining keys to None
for key in keys:
if key not in kwargs.keys():
setattr(self,key,None)
if self.max_solutions is None:
self.max_solutions = np.inf
if self.time_limit is None:
self.time_limit = np.inf
# first check if strain doesn't already fulfill the strain design setup
if self.verify_sd(sparse.csr_matrix((1,self.num_z)))[0]:
print('The strain already meets the requirements defined in the strain design setup. ' \
'No interventions are needed.')
return self.build_sd_solution([{}], OPTIMAL, ANY)
# otherwise continue
endtime = time.time() + self.time_limit
status = OPTIMAL
sols = sparse.csr_matrix((0,self.num_z))
print('Finding (also non-optimal) strain designs ...')
while sols.shape[0] < self.max_solutions and \
status == OPTIMAL and \
endtime-time.time() > 0:
print('Searching in full search space.')
self.milp.set_time_limit(endtime-time.time())
self.resetTargetableZ()
self.clearObjective()
self.fixObjective(self.c_bu,np.inf) # keep objective open
x, min_cx , status = self.milp.solve()
z = sparse.csr_matrix([x[i] for i in self.idx_z])
if np.isnan(z[0,0]):
break
if not all(self.verify_sd(z)):
self.milp.set_time_limit(endtime-time.time())
self.resetObjective()
self.setTargetableZ(z)
self.fixObjective(self.c_bu,np.sum([c*x for c,x in zip(self.c_bu,x)]))
z1, status1 = self.solveZ()
if status1 == OPTIMAL and not self.verify_sd(z1):
self.add_exclusion_constraints(z1)
output = self.sd2dict(z1)
print('Invalid minimal solution found: '+ str(output))
continue
if status1 != OPTIMAL and not self.verify_sd(z1):
self.add_exclusion_constraints_ineq(z);
output = self.sd2dict(z)
print('Invalid minimal solution found: '+ str(output))
continue
else:
output = self.sd2dict(z)
print('Warning: Solver first found the infeasible solution: '+ str(output))
output = self.sd2dict(z1)
print('But a subset of this solution seems to be valid: '+ str(output))
# Verify solution and explore subspace to get strain designs
cx = np.sum([c*x for c,x in zip(self.c_bu,x)])
if not self.is_mcs_computation:
print('Found solution with objective value '+str(cx))
print('Minimizing number of interventions in subspace with '+str(sum(z.toarray()[0]))+' possible targets.')
self.setMinIntvCostObjective()
self.setTargetableZ(z)
self.fixObjective(self.c_bu,cx)
while sols.shape[0] < self.max_solutions and \
status == OPTIMAL and \
endtime-time.time() > 0:
self.milp.set_time_limit(endtime-time.time())
x1, min_cx , status1 = self.milp.solve()
z1 = sparse.csr_matrix([x1[i] for i in self.idx_z])
output = self.sd2dict(z1)
if status1 in [OPTIMAL,TIME_LIMIT_W_SOL] and all(self.verify_sd(z1)):
print('Strain design with cost '+str(round((z1*self.cost)[0],6))+': '+str(output))
self.add_exclusion_constraints(z1)
sols = sparse.vstack((sols,z1))
elif status1 in [OPTIMAL,TIME_LIMIT_W_SOL]:
print('Invalid minimal solution found: '+ str(output))
self.add_exclusion_constraints(z)
else: # return to outside loop
break
if status == INFEASIBLE and sols.shape[0] > 0: # all solutions found
status = OPTIMAL
if status == TIME_LIMIT and sols.shape[0] > 0: # some solutions found, timelimit reached
status = TIME_LIMIT_W_SOL
if endtime-time.time() > 0 and sols.shape[0] > 0:
print('Finished solving strain design MILP. ')
if 'strainDesignMILP' in self.__module__:
print(str(sols.shape[0]) +' solutions found.')
elif endtime-time.time() > 0:
print('Finished solving strain design MILP.')
if 'strainDesignMILP' in self.__module__:
print(' No solutions | |
"""A collection of native images.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import errno
import glob
import io
import logging
import os
import shutil
import stat
from treadmill import appcfg
from treadmill import cgroups
from treadmill import exc
from treadmill import fs
from treadmill import keytabs
from treadmill import runtime
from treadmill import subproc
from treadmill import supervisor
from treadmill import utils
from treadmill.appcfg import abort as app_abort
from treadmill.fs import linux as fs_linux
from . import fs as image_fs
from . import _image_base
from . import _repository_base
from .. import _manifest
_LOGGER = logging.getLogger(__name__)
_CONTAINER_ENV_DIR = 'env'
_CONTAINER_DOCKER_ENV_DIR = os.path.join('docker', 'env')
_CONTAINER_DOCKER_ETC_DIR = os.path.join('docker', 'etc')
def create_docker_environ_dir(container_dir, root_dir, app):
"""Creates environ dir for docker"""
env_dir = os.path.join(container_dir, _CONTAINER_DOCKER_ENV_DIR)
env = {}
treadmill_bind_preload_so = os.path.basename(
subproc.resolve('treadmill_bind_preload.so')
)
if app.ephemeral_ports.tcp or app.ephemeral_ports.udp:
env['LD_PRELOAD'] = os.path.join(
_manifest.TREADMILL_BIND_PATH,
'$LIB',
treadmill_bind_preload_so
)
supervisor.create_environ_dir(env_dir, env)
# Bind the environ directory in the container volume
fs.mkdir_safe(os.path.join(root_dir, _CONTAINER_DOCKER_ENV_DIR))
fs_linux.mount_bind(
root_dir, os.path.join(os.sep, _CONTAINER_DOCKER_ENV_DIR),
source=os.path.join(container_dir, _CONTAINER_DOCKER_ENV_DIR),
recursive=False, read_only=True
)
def create_environ_dir(container_dir, root_dir, app):
"""Creates environ dir for s6-envdir."""
env_dir = os.path.join(container_dir, _CONTAINER_ENV_DIR)
env = {
'TREADMILL_APP': app.app,
'TREADMILL_CELL': app.cell,
'TREADMILL_CPU': app.cpu,
'TREADMILL_DISK': app.disk,
'TREADMILL_HOST_IP': app.network.external_ip,
'TREADMILL_IDENTITY': app.identity,
'TREADMILL_IDENTITY_GROUP': app.identity_group,
'TREADMILL_INSTANCEID': app.task,
'TREADMILL_MEMORY': app.memory,
'TREADMILL_PROID': app.proid,
'TREADMILL_ENV': app.environment,
}
for endpoint in app.endpoints:
envname = 'TREADMILL_ENDPOINT_{0}'.format(endpoint.name.upper())
env[envname] = endpoint.real_port
env['TREADMILL_EPHEMERAL_TCP_PORTS'] = ' '.join(
[str(port) for port in app.ephemeral_ports.tcp]
)
env['TREADMILL_EPHEMERAL_UDP_PORTS'] = ' '.join(
[str(port) for port in app.ephemeral_ports.udp]
)
env['TREADMILL_CONTAINER_IP'] = app.network.vip
env['TREADMILL_GATEWAY_IP'] = app.network.gateway
if app.shared_ip:
env['TREADMILL_SERVICE_IP'] = app.network.external_ip
supervisor.create_environ_dir(env_dir, env)
# Bind the environ directory in the container volume
fs.mkdir_safe(os.path.join(root_dir, _CONTAINER_ENV_DIR))
fs_linux.mount_bind(
root_dir, os.path.join(os.sep, _CONTAINER_ENV_DIR),
source=os.path.join(container_dir, _CONTAINER_ENV_DIR),
recursive=False, read_only=True
)
if hasattr(app, 'docker') and app.docker:
create_docker_environ_dir(container_dir, root_dir, app)
def create_supervision_tree(tm_env, container_dir, root_dir, app,
cgroups_path):
"""Creates s6 supervision tree."""
uniq_name = appcfg.app_unique_name(app)
ctl_uds = os.path.join(os.sep, 'run', 'tm_ctl')
tombstone_ctl_uds = os.path.join(ctl_uds, 'tombstone')
sys_dir = os.path.join(container_dir, 'sys')
try:
old_system_services = [
svc_name for svc_name in os.listdir(sys_dir)
if (not svc_name.startswith('.') and
os.path.isdir(os.path.join(sys_dir, svc_name)))
]
except FileNotFoundError:
old_system_services = []
new_system_services = [svc_def.name for svc_def in app.system_services]
for svc_name in set(old_system_services) - set(new_system_services):
_LOGGER.info('Removing old system service: %s', svc_name)
fs.rmtree_safe(os.path.join(sys_dir, svc_name))
sys_scandir = supervisor.create_scan_dir(
sys_dir,
finish_timeout=6000,
wait_cgroups=cgroups_path,
)
for svc_def in app.system_services:
if svc_def.restart is not None:
monitor_policy = {
'limit': svc_def.restart.limit,
'interval': svc_def.restart.interval,
'tombstone': {
'uds': False,
'path': tm_env.services_tombstone_dir,
'id': '{},{}'.format(uniq_name, svc_def.name)
}
}
else:
monitor_policy = None
supervisor.create_service(
sys_scandir,
name=svc_def.name,
app_run_script=svc_def.command,
userid='root',
environ_dir=os.path.join(container_dir, _CONTAINER_ENV_DIR),
environ={
envvar.name: envvar.value
for envvar in svc_def.environ
},
environment=app.environment,
downed=svc_def.downed,
trace=None,
monitor_policy=monitor_policy
)
sys_scandir.write()
services_dir = os.path.join(container_dir, 'services')
services_scandir = supervisor.create_scan_dir(
services_dir,
finish_timeout=5000
)
for svc_def in app.services:
if svc_def.restart is not None:
monitor_policy = {
'limit': svc_def.restart.limit,
'interval': svc_def.restart.interval,
'tombstone': {
'uds': True,
'path': tombstone_ctl_uds,
'id': '{},{}'.format(uniq_name, svc_def.name)
}
}
else:
monitor_policy = None
if svc_def.trace is not None:
trace = {
'instanceid': app.name,
'uniqueid': app.uniqueid,
'service': svc_def.name,
'path': os.path.join(ctl_uds, 'appevents')
}
else:
trace = None
logger_template = getattr(svc_def, 'logger', 's6.app-logger.run')
_LOGGER.info('Using logger: %s', logger_template)
supervisor.create_service(
services_scandir,
name=svc_def.name,
app_run_script=svc_def.command,
userid=svc_def.proid,
environ_dir='/' + _CONTAINER_ENV_DIR,
environ={
envvar.name: envvar.value
for envvar in svc_def.environ
},
environment=app.environment,
downed=svc_def.downed,
trace=trace if svc_def.trace else None,
log_run_script=logger_template,
monitor_policy=monitor_policy
)
services_scandir.write()
# Bind the service directory in the container volume
fs.mkdir_safe(os.path.join(root_dir, 'services'))
fs_linux.mount_bind(
root_dir, os.path.join(os.sep, 'services'),
source=os.path.join(container_dir, 'services'),
recursive=False, read_only=False
)
# Bind the ctrl directory in the container volume which has all the
# unix domain sockets to communicate outside the container to treadmill
fs.mkdir_safe(os.path.join(root_dir, 'run', 'tm_ctl'))
fs_linux.mount_bind(
root_dir, os.path.join(os.sep, 'run', 'tm_ctl'),
source=tm_env.ctl_dir,
recursive=False, read_only=False
)
def make_dev(newroot_norm):
"""Make /dev.
"""
fs_linux.mount_tmpfs(
newroot_norm, '/dev',
nodev=False, noexec=False, nosuid=True, relatime=False,
mode='0755'
)
devices = [
('/dev/null', 0o666, 1, 3),
('/dev/zero', 0o666, 1, 5),
('/dev/full', 0o666, 1, 7),
('/dev/tty', 0o666, 5, 0),
('/dev/random', 0o444, 1, 8),
('/dev/urandom', 0o444, 1, 9),
]
prev_umask = os.umask(0000)
for device, permissions, major, minor in devices:
os.mknod(
newroot_norm + device,
permissions | stat.S_IFCHR,
os.makedev(major, minor)
)
os.umask(prev_umask)
st = os.stat('/dev/tty')
os.chown(newroot_norm + '/dev/tty', st.st_uid, st.st_gid)
symlinks = [
('/dev/fd', '/proc/self/fd'),
('/dev/stdin', '/proc/self/fd/0'),
('/dev/stdout', '/proc/self/fd/1'),
('/dev/stderr', '/proc/self/fd/2'),
('/dev/core', '/proc/kcore'),
]
for link, target in symlinks:
fs.symlink_safe(newroot_norm + link, target)
for directory in ['/dev/shm', '/dev/pts', '/dev/mqueue']:
fs.mkdir_safe(newroot_norm + directory)
fs_linux.mount_tmpfs(
newroot_norm, '/dev/shm',
nodev=True, noexec=False, nosuid=True, relatime=False
)
fs_linux.mount_devpts(
newroot_norm, '/dev/pts',
gid=st.st_gid, mode='0620', ptmxmode='0666'
)
fs.symlink_safe(newroot_norm + '/dev/ptmx', 'pts/ptmx')
fs_linux.mount_mqueue(newroot_norm, '/dev/mqueue')
# Passthrough container log to host system logger.
fs_linux.mount_bind(newroot_norm, '/dev/log', read_only=False)
def make_fsroot(root_dir, app):
"""Initializes directory structure for the container in a new root.
The container uses pretty much a blank a FHS 3 layout.
- Bind directories in parent / (with exceptions - see below.)
- Skip /tmp, create /tmp in the new root with correct permissions.
- Selectively create / bind /var.
- /var/tmp (new)
- /var/log (new)
- /var/spool - create empty with dirs.
- Bind everything in /var, skipping /spool/tickets
tm_env is used to deliver abort events
"""
newroot_norm = fs.norm_safe(root_dir)
emptydirs = [
'/bin',
'/dev',
'/etc',
'/home',
'/lib',
'/lib64',
'/opt',
'/proc',
'/root',
'/run',
'/sbin',
'/sys',
'/tmp',
'/usr',
'/var/cache',
'/var/empty',
'/var/empty/sshd',
'/var/lib',
'/var/lock',
'/var/log',
'/var/opt',
'/var/spool',
'/var/tmp',
'/var/spool/keytabs',
'/var/spool/tickets',
'/var/spool/tokens',
# for SSS
'/var/lib/sss',
]
stickydirs = [
'/opt',
'/run',
'/tmp',
'/var/cache',
'/var/lib',
'/var/lock',
'/var/log',
'/var/opt',
'/var/tmp',
'/var/spool/keytabs',
'/var/spool/tickets',
'/var/spool/tokens',
]
# these folders are shared with underlying host and other containers,
mounts = [
'/bin',
'/etc', # TODO: Add /etc/opt
'/lib',
'/lib64',
'/root',
'/sbin',
'/usr',
# for SSS
'/var/lib/sss',
# TODO: Remove below once PAM UDS is implemented
os.path.expandvars('${TREADMILL_APPROOT}/env'),
os.path.expandvars('${TREADMILL_APPROOT}/spool'),
]
for directory in emptydirs:
fs.mkdir_safe(newroot_norm + directory)
for directory in stickydirs:
os.chmod(newroot_norm + directory, 0o777 | stat.S_ISVTX)
# /var/empty must be owned by root and not group or world-writable.
os.chmod(os.path.join(newroot_norm, 'var/empty'), 0o711)
fs_linux.mount_bind(
newroot_norm, os.path.join(os.sep, 'sys'),
source='/sys',
recursive=True, read_only=False
)
make_dev(newroot_norm)
# Per FHS3 /var/run should be a symlink to /run which should be tmpfs
fs.symlink_safe(
os.path.join(newroot_norm, 'var', 'run'),
'/run'
)
# We create an unbounded tmpfs mount so that runtime data can be written to
# it, counting against the memory limit of the container.
fs_linux.mount_tmpfs(newroot_norm, '/run')
# Make shared directories/files readonly to container
for mount in mounts:
if os.path.exists(mount):
fs_linux.mount_bind(
newroot_norm, mount,
recursive=True, read_only=True
)
if hasattr(app, 'docker') and app.docker:
# If unable to mount docker directory, we throw Aborted events
try:
_mount_docker_tmpfs(newroot_norm)
except FileNotFoundError as err:
_LOGGER.error('Failed to mount docker tmpfs: %s', err)
# this exception is caught by sproc run to generate abort event
raise exc.ContainerSetupError(
msg=str(err),
reason=app_abort.AbortedReason.UNSUPPORTED,
)
def _mount_docker_tmpfs(newroot_norm):
"""Mount tmpfs for docker
"""
# /etc/docker as temp fs as dockerd create /etc/docker/key.json
fs_linux.mount_tmpfs(newroot_norm, '/etc/docker')
def create_overlay(tm_env, container_dir, root_dir, app):
"""Create overlay configuration files for the container.
"""
# ldpreloads
_prepare_ldpreload(container_dir, app)
# hosts
_prepare_hosts(container_dir, app)
# resolv.conf
_prepare_resolv_conf(tm_env, container_dir)
# sshd PAM configuration
_prepare_pam_sshd(tm_env, container_dir, app)
# constructed keytab.
_prepare_krb(tm_env, container_dir, root_dir, app)
# bind prepared inside container
_bind_overlay(container_dir, root_dir)
if hasattr(app, 'docker') and app.docker:
_bind_overlay_docker(container_dir, root_dir)
def _prepare_krb(tm_env, container_dir, root_dir, app):
"""Manage kerberos environment inside container.
"""
etc_dir = os.path.join(container_dir, 'overlay', 'etc')
fs.mkdir_safe(etc_dir)
kt_dest = os.path.join(etc_dir, 'krb5.keytab')
kt_sources = glob.glob(os.path.join(tm_env.spool_dir, 'keytabs', 'host#*'))
keytabs.make_keytab(kt_dest, kt_sources)
for kt_spec in app.keytabs:
if ':' in kt_spec:
owner, princ = kt_spec.split(':', 1)
else:
owner = kt_spec
princ = kt_spec
kt_dest = os.path.join(root_dir, 'var', 'spool', 'keytabs', owner)
kt_sources = glob.glob(os.path.join(tm_env.spool_dir, 'keytabs',
'%s#*' % princ))
keytabs.make_keytab(kt_dest, kt_sources, owner)
def _prepare_ldpreload(container_dir, app):
"""Add mandatory ldpreloads to the container environment.
"""
etc_dir = os.path.join(container_dir, 'overlay', 'etc')
fs.mkdir_safe(etc_dir)
new_ldpreload = os.path.join(etc_dir, 'ld.so.preload')
try:
shutil.copyfile('/etc/ld.so.preload', new_ldpreload)
except IOError as err:
if err.errno != errno.ENOENT:
raise
_LOGGER.info('/etc/ld.so.preload not found, skipping.')
ldpreloads = []
if app.ephemeral_ports.tcp or app.ephemeral_ports.udp:
treadmill_bind_preload = subproc.resolve('treadmill_bind_preload.so')
ldpreloads.append(treadmill_bind_preload)
if not ldpreloads:
return
_LOGGER.info('Configuring /etc/ld.so.preload: %r', ldpreloads)
with io.open(new_ldpreload, 'a') as f:
f.write('\n'.join(ldpreloads) + '\n')
def _prepare_hosts(container_dir, app):
"""Create a hosts file for the container.
overlay/
/etc/
hosts # hosts file to be bind mounted in container.
/run/
/host-aliases/ # Directory to be bind mounted in container.
"""
etc_dir = os.path.join(container_dir, 'overlay', 'etc')
ha_dir = os.path.join(container_dir, 'overlay', 'run', 'host-aliases')
fs.mkdir_safe(etc_dir)
fs.mkdir_safe(ha_dir)
shutil.copyfile(
'/etc/hosts',
os.path.join(etc_dir, 'hosts')
)
(uid, gid) = utils.get_uid_gid(app.proid)
os.chown(ha_dir, uid, gid)
def _prepare_pam_sshd(tm_env, container_dir, app):
"""Override pam.d sshd stack with special sshd pam stack.
"""
pamd_dir = os.path.join(container_dir, 'overlay', 'etc', 'pam.d')
fs.mkdir_safe(pamd_dir)
new_pam_sshd = os.path.join(pamd_dir, 'sshd')
if app.shared_network:
template_pam_sshd = os.path.join(
tm_env.root, 'etc', 'pam.d', 'sshd.shared_network'
)
else:
template_pam_sshd = os.path.join(
| |
import pygame
import os
import sys
from pygame.locals import *
import characters
import constants as const
import random
from pathlib import Path
vec = pygame.math.Vector2
pygame.init()
class Game_State:
"""
The state of the game. Contains all of the sprites and interactable objects
Attributes:
view: A (Viewer) allowing the game to display onto the screen
_running: A (bool) depicting whether the game is running or not
_ghost_speed: an (int) depicting the number of pixels to move
the ghosts per frame
_level: an (int) depicting the current level
score: an (int) depicting the score the player has earned
walls: a (list) of all the positions walls exist
coins: a (list) of all the positions coins exist
coffees: a (list) of all the positions coffees exist
olin_man: (OlinMan) a type of Movable player that the user controls
red_ghost: (OlinMan) a type of Movable player that moves on its own
and can kill the player
blue_ghost: (OlinMan) a type of Movable player that moves on its own
and can kill the player, moves faster than red_ghost
ghosts: (pygame.sprite.Group) a group of sprites containing
all the ghosts
is_paused: A (bool) depicting whether the game is paused
clock: a (pygame.Clock) to control the timing of the game
"""
def __init__(self):
self.view = Viewer(self)
self._running = True
self._ghost_speed = 2
self._level = 0
self.score = 0
# create interactable objects
self.walls = []
self.coins = []
self.coffees = []
# sprites
self.olin_man = characters.OlinMan(self, 3)
self.red_ghost = characters.Ghost(self, vec(14, 14), self._ghost_speed)
self.blue_ghost = characters.Ghost(
self,
vec(13, 14),
self._ghost_speed + 1,
vec(-1, 0),
image="Fright_Ghost.png")
self.ghosts = pygame.sprite.Group(self.red_ghost, self.blue_ghost)
# build all the walls, coins, and coffees
self.setup()
self.is_paused = False
self.clock = pygame.time.Clock()
def setup(self):
"""
Set up all the walls, coins, coffees and reset the player and ghosts
"""
# if the game is playing and setup() is called to reset the board
# start a countdown
if not self.is_intro():
view.countdown()
# if the player is alive, reset the game board and sprites
if not self.player_is_dead():
self.make_walls()
self.make_coins()
self.make_coffee()
self.olin_man = characters.OlinMan(self, self.olin_man.lives)
self.red_ghost.reset((14, 14))
self.blue_ghost.reset((13, 14), vec(-1, 0))
def is_intro(self):
"""
Return whether the player is in the start screen (True)
"""
return self._level == 0
def is_running(self):
"""
Return whether the game is running (True) or is over (False)
"""
return self._running
def end_game(self):
"""
End the game by setting the running state to (False)
"""
self._running = False
def is_gameover(self):
"""
Return whether the player has enough lives to continue (False)
or has no lives left (True)
"""
if self.olin_man.lives < 1:
return True
else:
return False
def player_is_dead(self):
"""
Return whether the player is dead (True) or alive (False)
"""
return self.olin_man.dead
def player_is_vertical(self):
"""
Return whether the player is moving verticaly (True)
or horizontaly (False)
"""
return self.olin_man.direction in [vec(0, 1), vec(0, -1)]
def get_level(self):
"""
return the current level (int) of the game
"""
return self._level
def pause(self):
"""
Changes the game state to be paused, or unpauses the game
"""
# change the pause game state
self.is_paused = not self.is_paused
def check_quarter_second(self):
"""
Check if a quarter second has passed.
Return (True) if a quarter second has passed,
(False) if a half second has passed
"""
if pygame.time.get_ticks() % 500 > 250:
return True
else:
return False
def make_walls(self):
"""
Adds the position of walls from the map to self.walls
"""
for row in range(len(const.MAP)):
for col in range(len(const.MAP[row])):
# check if the position is a wall or ghost wall
if const.MAP[row][col] == 1 or const.MAP[row][col] == 2:
self.walls.append(vec(col, (row + 3)))
def make_coins(self):
"""
Adds the position of coins from the map to self.coins
"""
for row in range(len(const.MAP)):
for col in range(len(const.MAP[row])):
if const.MAP[row][col] == 4:
self.coins.append(vec(col, (row + 3)))
def make_coffee(self):
"""
Adds the position of coffees from the map to self.coffees
"""
for row in range(len(const.MAP)):
for col in range(len(const.MAP[row])):
if const.MAP[row][col] == 3:
self.coffees.append(vec(col, (row + 3)))
def check_colide(self):
"""
Check if OlinMan has ran into a ghost and calls OlinMan's collision()
Return (True) if there is a colision
"""
# finds all the ghosts coliding with Olin man
ghost = pygame.sprite.spritecollide(self.olin_man, self.ghosts, False)
# If a ghost did colide, call OlinMan's collision()
if ghost != []:
print("***You hit a Ghost***")
self.olin_man.collision()
return True
else:
return False
def update_highscore(self):
"""
Check if the highscore was beat and saves the highscore to a file
Return (True) if the highscore was beat
"""
if self.get_highscore() < self.score:
self.save_highscore(self.score)
print("You beat the highscore!")
return True
return False
@staticmethod
def save_highscore(score):
"""
Saves the highscore to a file
Args:
score: (int) the highest score achieved
Return (True) if the file is created/updated
"""
# Open a file in the directory linkdata with the file name "title"
# and save all the links in the file
with open("highscore.txt", "w") as file:
file.write(f"{score}")
return True
return False
@staticmethod
def get_highscore():
"""
Check if the file exists and return the highscore
Return a highscore (int)
"""
if not Path("highscore.txt").is_file():
return 0
with open("highscore.txt", "r") as file:
read_data = file.read()
return int(read_data)
class Controler:
"""
Controls the game by checking user inputs and modifying the game state
Attributes:
state: a (GameState) to access the conditions of the board
"""
def __init__(self, game):
"""
Creates a Controler instance
args:
state: a (GameState) to access the conditions of the board
"""
self.state = game
def start_events(self):
"""
Check keypresses during the start of the game
"""
for event in pygame.event.get():
# if the window was closed, end the game
if event.type == pygame.QUIT:
self.state.end_game()
print("Exiting..")
pygame.quit()
print("Done")
sys.exit()
# if the user pressed space increase the level and start the game
if event.type == pygame.KEYDOWN and event.key == pygame.K_SPACE:
self.state._level += 1
print("Level:", self.state.get_level())
def pause_events(self):
"""
Check keypresses during the pause menu
"""
for event in pygame.event.get():
# if the window was closed, end the game
if event.type == pygame.QUIT:
self.state.end_game()
print("Checking highscore")
state.update_highscore()
print("Exiting..")
pygame.quit()
print("Done")
sys.exit()
# if the user pressed [space], [p], or [esc] resume the game
if (
event.type == pygame.KEYDOWN and
(event.key == pygame.K_p or event.key == pygame.K_ESCAPE or
event.key == pygame.K_SPACE)
):
self.state.pause()
print("Resuming game...")
def events(self):
"""
Check keypresses during the main game
"""
for event in pygame.event.get():
# if the window was closed, end the game
if event.type == pygame.QUIT:
self.state._running = False
# if the user pressed [space], [p], or [esc] pause the game
elif (
event.type == pygame.KEYDOWN and
(event.key == pygame.K_p or
event.key == pygame.K_ESCAPE or
event.key == pygame.K_SPACE)
):
self.state.pause()
print("Paused")
# if the user pressed up, down, left, or right
# change OlinMan's direction
elif event.type == KEYDOWN or event.type == KEYUP:
if event.key == K_w or event.key == K_UP:
self.state.olin_man.move("up")
elif event.key == K_s or event.key == K_DOWN:
self.state.olin_man.move("down")
elif event.key == K_a or event.key == K_LEFT:
self.state.olin_man.move("left")
elif event.key == K_d or event.key == K_RIGHT:
self.state.olin_man.move("right")
self.state.olin_man.update()
class Viewer:
"""
Creates the screen and displays the game to the screen.
Attributes:
state: a (GameState) for checking conditions of the board
_text_size: An (int) depicting the size of text in pixels
_font: a (pygame.font.Font) object controling the games font
window_screen: a (pygame.display) window to display everything on
background: a (pygame.image) of the original PacMan map
wall: a (pygame.image) of the wall blocks
coin: a (pygame.image) of a coin
post_it: a (pygame.image) of stack of post-it notes
coffee: a (pygame.image) of an Acronym Coffee
heart: a (pygame.image) of the heart of OlinMan
"""
def __init__(self, game):
"""
Creates a Viewer instance
args:
state: a (GameState) to access the conditions of the board
"""
self.state = game
# set up the font
self._text_size = 16
self._font = pygame.font.Font(
"PressStart2P-Regular.ttf",
self._text_size
)
# setup the basic window
self.window_screen = pygame.display.set_mode(
(const.WINDOW_WIDTH, const.WINDOW_HEIGHT)
)
self.window_screen.fill(const.BLACK)
# load all the images from local files
self.background = self.object_image("Maze.jpeg", x=28, y=31)
self.wall = self.object_image("Wall.png")
self.coin = self.object_image("Coin.png")
self.post_it = | |
(Element,), {'name': 'joints'})
keywords = type('keywords', (Element,), {'name': 'keywords'})
kinematics_model = type('kinematics_model', (Element,), {'name': 'kinematics_model'})
kinematics_scene = type('kinematics_scene', (Element,), {'name': 'kinematics_scene'})
kinematics = type('kinematics', (Element,), {'name': 'kinematics'})
lambert = type('lambert', (Element,), {'name': 'lambert'})
latitude = type('latitude', (Element,), {'name': 'latitude'})
layer = type('layer', (Element,), {'name': 'layer'})
library_animation_clips = type('library_animation_clips', (Element,), {'name': 'library_animation_clips'})
library_animations = type('library_animations', (Element,), {'name': 'library_animations'})
library_articulated_systems = type('library_articulated_systems', (Element,), {'name': 'library_articulated_systems'})
library_cameras = type('library_cameras', (Element,), {'name': 'library_cameras'})
library_controllers = type('library_controllers', (Element,), {'name': 'library_controllers'})
library_effects = type('library_effects', (Element,), {'name': 'library_effects'})
library_force_fields = type('library_force_fields', (Element,), {'name': 'library_force_fields'})
library_formulas = type('library_formulas', (Element,), {'name': 'library_formulas'})
library_geometries = type('library_geometries', (Element,), {'name': 'library_geometries'})
library_images = type('library_images', (Element,), {'name': 'library_images'})
library_joints = type('library_joints', (Element,), {'name': 'library_joints'})
library_kinematics_models = type('library_kinematics_models', (Element,), {'name': 'library_kinematics_models'})
library_kinematics_scenes = type('library_kinematics_scenes', (Element,), {'name': 'library_kinematics_scenes'})
library_nodes = type('library_nodes', (Element,), {'name': 'library_nodes'})
library_physics_materials = type('library_physics_materials', (Element,), {'name': 'library_physics_materials'})
library_physics_models = type('library_physics_models', (Element,), {'name': 'library_physics_models'})
library_physics_scenes = type('library_physics_scenes', (Element,), {'name': 'library_physics_scenes'})
library_visual_scenes = type('library_visual_scenes', (Element,), {'name': 'library_visual_scenes'})
light_ambient = type('light_ambient', (Element,), {'name': 'light_ambient'})
light_constant_attenuation = type('light_constant_attenuation', (Element,), {'name': 'light_constant_attenuation'})
light_diffuse = type('light_diffuse', (Element,), {'name': 'light_diffuse'})
light_enable = type('light_enable', (Element,), {'name': 'light_enable'})
light_linear_attenuation = type('light_linear_attenuation', (Element,), {'name': 'light_linear_attenuation'})
light_model_ambient = type('light_model_ambient', (Element,), {'name': 'light_model_ambient'})
light_model_color_control = type('light_model_color_control', (Element,), {'name': 'light_model_color_control'})
light_model_local_viewer_enable = type('light_model_local_viewer_enable', (Element,), {'name': 'light_model_local_viewer_enable'})
light_model_two_side_enable = type('light_model_two_side_enable', (Element,), {'name': 'light_model_two_side_enable'})
light_position = type('light_position', (Element,), {'name': 'light_position'})
light_quadratic_attenuation = type('light_quadratic_attenuation', (Element,), {'name': 'light_quadratic_attenuation'})
light_specular = type('light_specular', (Element,), {'name': 'light_specular'})
light_spot_cutoff = type('light_spot_cutoff', (Element,), {'name': 'light_spot_cutoff'})
light_spot_direction = type('light_spot_direction', (Element,), {'name': 'light_spot_direction'})
light_spot_exponent = type('light_spot_exponent', (Element,), {'name': 'light_spot_exponent'})
lighting_enable = type('lighting_enable', (Element,), {'name': 'lighting_enable'})
lights = type('lights', (Element,), {'name': 'lights'})
limits = type('limits', (Element,), {'name': 'limits'})
axis_info = type('axis_info', (Element,), {'name': 'axis_info'})
prismatic = type('prismatic', (Element,), {'name': 'prismatic'})
revolute = type('revolute', (Element,), {'name': 'revolute'})
line_smooth_enable = type('line_smooth_enable', (Element,), {'name': 'line_smooth_enable'})
line_stipple_enable = type('line_stipple_enable', (Element,), {'name': 'line_stipple_enable'})
line_stipple = type('line_stipple', (Element,), {'name': 'line_stipple'})
line_width = type('line_width', (Element,), {'name': 'line_width'})
line = type('line', (Element,), {'name': 'line'})
linear_attenuation = type('linear_attenuation', (Element,), {'name': 'linear_attenuation'})
spot = type('spot', (Element,), {'name': 'spot'})
linear = type('linear', (Element,), {'name': 'linear'})
lines = type('lines', (Element,), {'name': 'lines'})
linestrips = type('linestrips', (Element,), {'name': 'linestrips'})
link = type('link', (Element,), {'name': 'link'})
linker = type('linker', (Element,), {'name': 'linker'})
locked = type('locked', (Element,), {'name': 'locked'})
logic_op_enable = type('logic_op_enable', (Element,), {'name': 'logic_op_enable'})
logic_op = type('logic_op', (Element,), {'name': 'logic_op'})
longitude = type('longitude', (Element,), {'name': 'longitude'})
lookout = type('lookout', (Element,), {'name': 'lookout'})
magfilter = type('magfilter', (Element,), {'name': 'magfilter'})
mass_frame = type('mass_frame', (Element,), {'name': 'mass_frame'})
rigid_body = type('rigid_body', (Element,), {'name': 'rigid_body'})
mass = type('mass', (Element,), {'name': 'mass'})
shape = type('shape', (Element,), {'name': 'shape'})
material_ambient = type('material_ambient', (Element,), {'name': 'material_ambient'})
material_diffuse = type('material_diffuse', (Element,), {'name': 'material_diffuse'})
material_emission = type('material_emission', (Element,), {'name': 'material_emission'})
material_shininess = type('material_shininess', (Element,), {'name': 'material_shininess'})
material_specular = type('material_specular', (Element,), {'name': 'material_specular'})
material = type('material', (Element,), {'name': 'material'})
matrix = type('matrix', (Element,), {'name': 'matrix'})
max_anisotropy = type('max_anisotropy', (Element,), {'name': 'max_anisotropy'})
mesh = type('mesh', (Element,), {'name': 'mesh'})
minfilter = type('minfilter', (Element,), {'name': 'minfilter'})
mip_bias = type('mip_bias', (Element,), {'name': 'mip_bias'})
mip_max_level = type('mip_max_level', (Element,), {'name': 'mip_max_level'})
mip_min_level = type('mip_min_level', (Element,), {'name': 'mip_min_level'})
mipfilter = type('mipfilter', (Element,), {'name': 'mipfilter'})
mips = type('mips', (Element,), {'name': 'mips'})
create_cube = type('create_cube', (Element,), {'name': 'create_cube'})
create2d = type('create2d', (Element,), {'name': 'create2d'})
create3d = type('create3d', (Element,), {'name': 'create3d'})
model_view_matrix = type('model_view_matrix', (Element,), {'name': 'model_view_matrix'})
modified = type('modified', (Element,), {'name': 'modified'})
modifier = type('modifier', (Element,), {'name': 'modifier'})
morph = type('morph', (Element,), {'name': 'morph'})
motion = type('motion', (Element,), {'name': 'motion'})
multisample_enable = type('multisample_enable', (Element,), {'name': 'multisample_enable'})
Name_array = type('Name_array', (Element,), {'name': 'Name_array'})
emantic = type('emantic', (Element,), {'name': 'emantic'})
newparam = type('newparam', (Element,), {'name': 'newparam'})
ommon = type('ommon', (Element,), {'name': 'ommon'})
node = type('node', (Element,), {'name': 'node'})
normalize_enable = type('normalize_enable', (Element,), {'name': 'normalize_enable'})
nurbs_surface = type('nurbs_surface', (Element,), {'name': 'nurbs_surface'})
nurbs = type('nurbs', (Element,), {'name': 'nurbs'})
optics = type('optics', (Element,), {'name': 'optics'})
orient = type('orient', (Element,), {'name': 'orient'})
origin = type('origin', (Element,), {'name': 'origin'})
orthographic = type('orthographic', (Element,), {'name': 'orthographic'})
p = type('p', (Element,), {'name': 'p'})
edges = type('edges', (Element,), {'name': 'edges'})
faces = type('faces', (Element,), {'name': 'faces'})
lines = type('lines', (Element,), {'name': 'lines'})
pcurves = type('pcurves', (Element,), {'name': 'pcurves'})
ph = type('ph', (Element,), {'name': 'ph'})
solids = type('solids', (Element,), {'name': 'solids'})
triangles = type('triangles', (Element,), {'name': 'triangles'})
trifans = type('trifans', (Element,), {'name': 'trifans'})
tristrips = type('tristrips', (Element,), {'name': 'tristrips'})
wires = type('wires', (Element,), {'name': 'wires'})
parabola = type('parabola', (Element,), {'name': 'parabola'})
param = type('param', (Element,), {'name': 'param'})
ph = type('ph', (Element,), {'name': 'ph'})
phong = type('phong', (Element,), {'name': 'phong'})
physics_material = type('physics_material', (Element,), {'name': 'physics_material'})
physics_model = type('physics_model', (Element,), {'name': 'physics_model'})
physics_scene = type('physics_scene', (Element,), {'name': 'physics_scene'})
plane = type('plane', (Element,), {'name': 'plane'})
point_distance_attenuation = type('point_distance_attenuation', (Element,), {'name': 'point_distance_attenuation'})
point_fade_threshold_size = type('point_fade_threshold_size', (Element,), {'name': 'point_fade_threshold_size'})
point_size_max = type('point_size_max', (Element,), {'name': 'point_size_max'})
point_size_min = type('point_size_min', (Element,), {'name': 'point_size_min'})
point_size = type('point_size', (Element,), {'name': 'point_size'})
point_smooth_enable = type('point_smooth_enable', (Element,), {'name': 'point_smooth_enable'})
polygon_mode = type('polygon_mode', (Element,), {'name': 'polygon_mode'})
polygon_offset_fill_enable = type('polygon_offset_fill_enable', (Element,), {'name': 'polygon_offset_fill_enable'})
polygon_offset_line_enable = type('polygon_offset_line_enable', (Element,), {'name': 'polygon_offset_line_enable'})
polygon_offset_point_enable = type('polygon_offset_point_enable', (Element,), {'name': 'polygon_offset_point_enable'})
polygon_offset = type('polygon_offset', (Element,), {'name': 'polygon_offset'})
polygon_smooth_enable = type('polygon_smooth_enable', (Element,), {'name': 'polygon_smooth_enable'})
polygon_stipple_enable = type('polygon_stipple_enable', (Element,), {'name': 'polygon_stipple_enable'})
polygons = type('polygons', (Element,), {'name': 'polygons'})
polylist = type('polylist', (Element,), {'name': 'polylist'})
prismatic = type('prismatic', (Element,), {'name': 'prismatic'})
profile_BRIDGE = type('profile_BRIDGE', (Element,), {'name': 'profile_BRIDGE'})
profile_CG = type('profile_CG', (Element,), {'name': 'profile_CG'})
profile_COMMON = type('profile_COMMON', (Element,), {'name': 'profile_COMMON'})
verview = type('verview', (Element,), {'name': 'verview'})
profile_GLES = type('profile_GLES', (Element,), {'name': 'profile_GLES'})
profile_GLES2 = type('profile_GLES2', (Element,), {'name': 'profile_GLES2'})
profile_GLSL = type('profile_GLSL', (Element,), {'name': 'profile_GLSL'})
program = type('program', (Element,), {'name': 'program'})
projection_matrix = type('projection_matrix', (Element,), {'name': 'projection_matrix'})
quadratic_attenuation = type('quadratic_attenuation', (Element,), {'name': 'quadratic_attenuation'})
point = type('point', (Element,), {'name': 'point'})
radius = type('radius', (Element,), {'name': 'radius'})
capsule = type('capsule', (Element,), {'name': 'capsule'})
circle = type('circle', (Element,), {'name': 'circle'})
cone = type('cone', (Element,), {'name': 'cone'})
cylinder = type('cylinder', (Element,), {'name': 'cylinder'})
ellipse = type('ellipse', (Element,), {'name': 'ellipse'})
hyperbola = type('hyperbola', (Element,), {'name': 'hyperbola'})
torus = type('torus', (Element,), {'name': 'torus'})
ref_attachment = type('ref_attachment', (Element,), {'name': 'ref_attachment'})
ref = type('ref', (Element,), {'name': 'ref'})
binary = type('binary', (Element,), {'name': 'binary'})
init_from = type('init_from', (Element,), {'name': 'init_from'})
reflective = type('reflective', (Element,), {'name': 'reflective'})
reflectivity = type('reflectivity', (Element,), {'name': 'reflectivity'})
render = type('render', (Element,), {'name': 'render'})
renderable = type('renderable', (Element,), {'name': 'renderable'})
rescale_normal_enable = type('rescale_normal_enable', (Element,), {'name': 'rescale_normal_enable'})
restitution = type('restitution', (Element,), {'name': 'restitution'})
revision = type('revision', (Element,), {'name': 'revision'})
revolute = type('revolute', (Element,), {'name': 'revolute'})
RGB = type('RGB', (Element,), {'name': 'RGB'})
rigid_constraint = type('rigid_constraint', (Element,), {'name': 'rigid_constraint'})
rotate = type('rotate', (Element,), {'name': 'rotate'})
sample_alpha_to_coverage_enable = type('sample_alpha_to_coverage_enable', (Element,), {'name': 'sample_alpha_to_coverage_enable'})
sample_alpha_to_one_enable = type('sample_alpha_to_one_enable', (Element,), {'name': 'sample_alpha_to_one_enable'})
sample_coverage_enable = type('sample_coverage_enable', (Element,), {'name': 'sample_coverage_enable'})
sample_coverage = type('sample_coverage', (Element,), {'name': 'sample_coverage'})
sampler_image = type('sampler_image', (Element,), {'name': 'sampler_image'})
sampler_states = type('sampler_states', (Element,), {'name': 'sampler_states'})
sampler = type('sampler', (Element,), {'name': 'sampler'})
nterpolation = type('nterpolation', (Element,), {'name': 'nterpolation'})
sampler1D = type('sampler1D', (Element,), {'name': 'sampler1D'})
sampler2D = type('sampler2D', (Element,), {'name': 'sampler2D'})
sampler3D = type('sampler3D', (Element,), {'name': 'sampler3D'})
samplerCUBE = type('samplerCUBE', (Element,), {'name': 'samplerCUBE'})
samplerDEPTH = type('samplerDEPTH', (Element,), {'name': 'samplerDEPTH'})
samplerRECT = type('samplerRECT', (Element,), {'name': 'samplerRECT'})
scale = type('scale', (Element,), {'name': 'scale'})
scene = type('scene', (Element,), {'name': 'scene'})
scissor_test_enable = type('scissor_test_enable', (Element,), {'name': 'scissor_test_enable'})
scissor = type('scissor', (Element,), {'name': 'scissor'})
semantic = type('semantic', (Element,), {'name': 'semantic'})
setparam = type('setparam', (Element,), {'name': 'setparam'})
shade_model = type('shade_model', (Element,), {'name': 'shade_model'})
shader = type('shader', (Element,), {'name': 'shader'})
shape = type('shape', (Element,), {'name': 'shape'})
shells = type('shells', (Element,), {'name': 'shells'})
shininess = type('shininess', (Element,), {'name': 'shininess'})
SIDREF_array = type('SIDREF_array', (Element,), {'name': 'SIDREF_array'})
size_exact = type('size_exact', (Element,), {'name': 'size_exact'})
size_ratio = type('size_ratio', (Element,), {'name': 'size_ratio'})
size = type('size', (Element,), {'name': 'size'})
create_cube = type('create_cube', (Element,), {'name': 'create_cube'})
create3d = type('create3d', (Element,), {'name': 'create3d'})
skeleton = type('skeleton', (Element,), {'name': 'skeleton'})
skew = type('skew', (Element,), {'name': 'skew'})
skin = type('skin', (Element,), {'name': 'skin'})
source_data = type('source_data', (Element,), {'name': 'source_data'})
source = type('source', (Element,), {'name': 'source'})
sources = type('sources', (Element,), {'name': 'sources'})
specular = type('specular', (Element,), {'name': 'specular'})
speed = type('speed', (Element,), {'name': 'speed'})
sphere = type('sphere', (Element,), {'name': 'sphere'})
spline = type('spline', (Element,), {'name': 'spline'})
nterpolation = type('nterpolation', (Element,), {'name': 'nterpolation'})
spot = type('spot', (Element,), {'name': 'spot'})
spring = type('spring', (Element,), {'name': 'spring'})
states = type('states', (Element,), {'name': 'states'})
static_friction = type('static_friction', (Element,), {'name': 'static_friction'})
stencil_clear = type('stencil_clear', (Element,), {'name': 'stencil_clear'})
stencil_func_separate = type('stencil_func_separate', (Element,), {'name': 'stencil_func_separate'})
stencil_func = type('stencil_func', (Element,), {'name': 'stencil_func'})
stencil_mask_separate = type('stencil_mask_separate', (Element,), {'name': 'stencil_mask_separate'})
stencil_mask = type('stencil_mask', (Element,), {'name': 'stencil_mask'})
stencil_op_separate = type('stencil_op_separate', (Element,), {'name': 'stencil_op_separate'})
stencil_op = type('stencil_op', (Element,), {'name': 'stencil_op'})
stencil_target = type('stencil_target', (Element,), {'name': 'stencil_target'})
stencil_test_enable = type('stencil_test_enable', (Element,), {'name': 'stencil_test_enable'})
stiffness = type('stiffness', (Element,), {'name': 'stiffness'})
subject = type('subject', (Element,), {'name': 'subject'})
surface_curves = type('surface_curves', (Element,), {'name': 'surface_curves'})
surface = type('surface', (Element,), {'name': 'surface'})
surfaces = type('surfaces', (Element,), {'name': 'surfaces'})
swept_surface = type('swept_surface', (Element,), {'name': 'swept_surface'})
swing_cone_and_twist = type('swing_cone_and_twist', (Element,), {'name': 'swing_cone_and_twist'})
target_value = type('target_value', (Element,), {'name': 'target_value'})
target = type('target', (Element,), {'name': 'target'})
targets = type('targets', (Element,), {'name': 'targets'})
bind_material = type('bind_material', (Element,), {'name': 'bind_material'})
formula = type('formula', (Element,), {'name': 'formula'})
instance_rigid_body = type('instance_rigid_body', (Element,), {'name': 'instance_rigid_body'})
kinematics_model = type('kinematics_model', (Element,), {'name': 'kinematics_model'})
kinematics = type('kinematics', (Element,), {'name': 'kinematics'})
light = type('light', (Element,), {'name': 'light'})
motion = type('motion', (Element,), {'name': 'motion'})
optics = type('optics', (Element,), {'name': 'optics'})
rigid_body = type('rigid_body', (Element,), {'name': 'rigid_body'})
verview = type('verview', (Element,), {'name': 'verview'})
technique_hint = type('technique_hint', (Element,), {'name': 'technique_hint'})
technique_override = type('technique_override', (Element,), {'name': 'technique_override'})
texcombiner = type('texcombiner', (Element,), {'name': 'texcombiner'})
texcoord = type('texcoord', (Element,), {'name': 'texcoord'})
texenv = type('texenv', (Element,), {'name': 'texenv'})
texture_env_color = type('texture_env_color', (Element,), {'name': 'texture_env_color'})
texture_env_mode = type('texture_env_mode', (Element,), {'name': 'texture_env_mode'})
texture_pipeline = type('texture_pipeline', | |
from snovault import (
calculated_property,
collection,
load_schema,
)
from .base import (
Item,
paths_filtered_by_status,
)
from .shared_calculated_properties import (
CalculatedBiosampleSlims,
CalculatedBiosampleSynonyms
)
import re
@collection(
name='biosamples',
unique_key='accession',
properties={
'title': 'Biosamples',
'description': 'Biosamples used in the Lung Map project',
})
class Biosample(Item, CalculatedBiosampleSlims, CalculatedBiosampleSynonyms):
item_type = 'biosample'
schema = load_schema('encoded:schemas/biosample.json')
name_key = 'accession'
rev = {
'characterizations': ('BiosampleCharacterization', 'characterizes'),
'parent_of': ('Biosample', 'part_of'),
}
embedded = [
'donor',
'donor.mutated_gene',
'donor.organism',
'donor.characterizations',
'donor.characterizations.award',
'donor.characterizations.lab',
'donor.characterizations.submitted_by',
'donor.documents',
'donor.documents.award',
'donor.documents.lab',
'donor.documents.submitted_by',
'donor.references',
'model_organism_donor_constructs',
'model_organism_donor_constructs.submitted_by',
'model_organism_donor_constructs.promoter_used',
'model_organism_donor_constructs.target',
'model_organism_donor_constructs.documents',
'model_organism_donor_constructs.documents.award',
'model_organism_donor_constructs.documents.lab',
'model_organism_donor_constructs.documents.submitted_by',
'submitted_by',
'lab',
'award',
'award.pi.lab',
'source',
'treatments',
'treatments.documents.submitted_by',
'treatments.documents.lab',
'treatments.documents.award',
'constructs',
'constructs.documents.submitted_by',
'constructs.documents.award',
'constructs.documents.lab',
'constructs.target',
'documents.lab',
'documents.award',
'documents.submitted_by',
'derived_from',
'part_of',
'part_of.documents',
'part_of.documents.award',
'part_of.documents.lab',
'part_of.documents.submitted_by',
'part_of.characterizations.documents',
'part_of.characterizations.documents.award',
'part_of.characterizations.documents.lab',
'part_of.characterizations.documents.submitted_by',
'part_of.constructs.documents',
'part_of.constructs.documents.award',
'part_of.constructs.documents.lab',
'part_of.constructs.documents.submitted_by',
'part_of.rnais.documents.award',
'part_of.rnais.documents.lab',
'part_of.rnais.documents.submitted_by',
'part_of.treatments.documents',
'part_of.talens.documents',
'parent_of',
'pooled_from',
'characterizations.submitted_by',
'characterizations.award',
'characterizations.lab',
'rnais',
'rnais.target',
'rnais.target.organism',
'rnais.source',
'rnais.documents.submitted_by',
'rnais.documents.award',
'rnais.documents.lab',
'organism',
'references',
'talens',
'talens.documents',
'talens.documents.award',
'talens.documents.lab',
'talens.documents.submitted_by',
'genetic_modifications',
'genetic_modifications.award',
'genetic_modifications.lab',
'genetic_modifications.modification_techniques',
'genetic_modifications.treatments',
'genetic_modifications.target'
]
audit_inherit = [
'donor',
'donor.mutated_gene',
'donor.organism',
'donor.characterizations',
'donor.characterizations.award',
'donor.characterizations.lab',
'donor.characterizations.submitted_by',
'donor.donor_documents',
'donor.donor_documents.award',
'donor.donor_documents.lab',
'donor.donor_documents.submitted_by',
'donor.references',
'model_organism_donor_constructs',
'model_organism_donor_constructs.submitted_by',
'model_organism_donor_constructs.target',
'model_organism_donor_constructs.documents',
'model_organism_donor_constructs.documents.award',
'model_organism_donor_constructs.documents.lab',
'model_organism_donor_constructs.documents.submitted_by',
'submitted_by',
'lab',
'award',
'award.pi.lab',
'source',
'treatments',
'treatments.documents.submitted_by',
'treatments.documents.lab',
'treatments.documents.award',
'constructs',
'constructs.documents.submitted_by',
'constructs.documents.award',
'constructs.documents.lab',
'constructs.target',
'documents.lab',
'documents.award',
'documents.submitted_by',
'derived_from',
'pooled_from',
'characterizations.submitted_by',
'characterizations.award',
'characterizations.lab',
'rnais',
'rnais.target',
'rnais.target.organism',
'rnais.source',
'rnais.documents.submitted_by',
'rnais.documents.award',
'rnais.documents.lab',
'organism',
'references',
'talens',
'talens.documents',
'talens.documents.award',
'talens.documents.lab',
'talens.documents.submitted_by',
'genetic_modifications',
'genetic_modifications.award',
'genetic_modifications.lab',
'genetic_modifications.modification_techniques',
'genetic_modifications.treatments',
'genetic_modifications.target'
]
@calculated_property(define=True,
schema={"title": "Sex",
"type": "string"})
def sex(self, request, donor=None, model_organism_sex=None, organism=None):
humanFlag = False
if organism is not None:
organismObject = request.embed(organism, '@@object')
if organismObject['scientific_name'] == 'Homo sapiens':
humanFlag = True
if humanFlag is True:
if donor is not None: # try to get the sex from the donor
donorObject = request.embed(donor, '@@object')
if 'sex' in donorObject:
return donorObject['sex']
else:
return 'unknown'
else:
return 'unknown'
else:
if model_organism_sex is not None:
return model_organism_sex
else:
return 'unknown'
@calculated_property(define=True,
schema={"title": "Age",
"type": "string"})
def age(self, request, donor=None, model_organism_age=None, organism=None):
humanFlag = False
if organism is not None:
organismObject = request.embed(organism, '@@object')
if organismObject['scientific_name'] == 'Homo sapiens':
humanFlag = True
if humanFlag is True:
if donor is not None: # try to get the age from the donor
donorObject = request.embed(donor, '@@object')
if 'age' in donorObject:
return donorObject['age']
else:
return 'unknown'
else:
return 'unknown'
else:
if model_organism_age is not None:
return model_organism_age
else:
return 'unknown'
@calculated_property(define=True,
schema={"title": "Age units",
"type": "string"})
def age_units(self, request, donor=None, model_organism_age_units=None, organism=None):
humanFlag = False
if organism is not None:
organismObject = request.embed(organism, '@@object')
if organismObject['scientific_name'] == 'Homo sapiens':
humanFlag = True
if humanFlag is True:
if donor is not None: # try to get the age_units from the donor
donorObject = request.embed(donor, '@@object')
if 'age_units' in donorObject:
return donorObject['age_units']
else:
return None
else:
return None
else:
return model_organism_age_units
@calculated_property(define=True,
schema={"title": "Health status",
"type": "string"})
def health_status(self, request, donor=None, model_organism_health_status=None, organism=None):
humanFlag = False
if organism is not None:
organismObject = request.embed(organism, '@@object')
if organismObject['scientific_name'] == 'Homo sapiens':
humanFlag = True
if humanFlag is True and donor is not None:
donorObject = request.embed(donor, '@@object')
if 'health_status' in donorObject:
return donorObject['health_status']
else:
return None
else:
if humanFlag is False:
return model_organism_health_status
return None
@calculated_property(define=True,
schema={"title": "Life stage",
"type": "string"})
def life_stage(self, request, donor=None, mouse_life_stage=None, fly_life_stage=None,
worm_life_stage=None, organism=None):
humanFlag = False
if organism is not None:
organismObject = request.embed(organism, '@@object')
if organismObject['scientific_name'] == 'Homo sapiens':
humanFlag = True
if humanFlag is True and donor is not None:
donorObject = request.embed(donor, '@@object')
if 'life_stage' in donorObject:
return donorObject['life_stage']
else:
return 'unknown'
else:
if humanFlag is False:
if mouse_life_stage is not None:
return mouse_life_stage
if fly_life_stage is not None:
return fly_life_stage
if worm_life_stage is not None:
return worm_life_stage
return 'unknown'
@calculated_property(define=True,
schema={"title": "Synchronization",
"type": "string"})
def synchronization(self, request, donor=None, mouse_synchronization_stage=None,
fly_synchronization_stage=None, worm_synchronization_stage=None):
# XXX mouse_synchronization_stage does not exist
if mouse_synchronization_stage is not None:
return mouse_synchronization_stage
if fly_synchronization_stage is not None:
return fly_synchronization_stage
if worm_synchronization_stage is not None:
return worm_synchronization_stage
if donor is not None:
return request.embed(donor, '@@object').get('synchronization')
@calculated_property(schema={
"title": "DNA constructs",
"description":
"Expression or targeting vectors stably or transiently transfected "
"(not RNAi) into a donor organism.",
"type": "array",
"items": {
"title": "DNA Constructs",
"description": "An expression or targeting vector stably or transiently transfected "
"(not RNAi) into a donor organism.",
"comment": "See contstruct.json for available identifiers.",
"type": "string",
"linkTo": "Construct",
},
}, define=True)
def model_organism_donor_constructs(self, request, donor=None):
if donor is not None:
return request.embed(donor, '@@object').get('constructs')
@calculated_property(schema={
"title": "Characterizations",
"type": "array",
"items": {
"type": ['string', 'object'],
"linkFrom": "BiosampleCharacterization.characterizes",
},
})
def characterizations(self, request, characterizations):
return paths_filtered_by_status(request, characterizations)
@calculated_property(schema={
"title": "Child biosamples",
"type": "array",
"items": {
"type": ['string', 'object'],
"linkFrom": "Biosample.part_of",
},
})
def parent_of(self, request, parent_of):
return paths_filtered_by_status(request, parent_of)
@calculated_property(schema={
"title": "Age",
"type": "string",
})
def age_display(self, request, donor=None, model_organism_age=None,
model_organism_age_units=None, post_synchronization_time=None,
post_synchronization_time_units=None):
if post_synchronization_time is not None and post_synchronization_time_units is not None:
return u'{sync_time} {sync_time_units}'.format(
sync_time=post_synchronization_time,
sync_time_units=post_synchronization_time_units)
if donor is not None:
donor = request.embed(donor, '@@object')
if 'age' in donor and 'age_units' in donor:
if donor['age'] == 'unknown':
return ''
return u'{age} {age_units}'.format(**donor)
if model_organism_age is not None and model_organism_age_units is not None:
return u'{age} {age_units}'.format(
age=model_organism_age,
age_units=model_organism_age_units,
)
return None
@calculated_property(condition='depleted_in_term_name', schema={
"title": "depleted_in_term_id",
"type": "string",
})
def depleted_in_term_id(self, request, depleted_in_term_name):
term_lookup = {
'head': 'UBERON:0000033',
'limb': 'UBERON:0002101',
'salivary gland': 'UBERON:0001044',
'male accessory sex gland': 'UBERON:0010147',
'testis': 'UBERON:0000473',
'female gonad': 'UBERON:0000992',
'digestive system': 'UBERON:0001007',
'arthropod fat body': 'UBERON:0003917',
'antenna': 'UBERON:0000972',
'adult maxillary segment': 'FBbt:00003016',
'female reproductive system': 'UBERON:0000474',
'male reproductive system': 'UBERON:0000079'
}
term_id = list()
for term_name in depleted_in_term_name:
if term_name in term_lookup:
term_id.append(term_lookup.get(term_name))
else:
term_id.append('Term ID unknown')
return term_id
@calculated_property(condition='subcellular_fraction_term_name', schema={
"title": "subcellular_fraction_term_id",
"type": "string",
})
def subcellular_fraction_term_id(self, request, subcellular_fraction_term_name):
term_lookup = {
'nucleus': 'GO:0005634',
'cytosol': 'GO:0005829',
'chromatin': 'GO:0000785',
'membrane': 'GO:0016020',
'mitochondria': 'GO:0005739',
'nuclear matrix': 'GO:0016363',
'nucleolus': 'GO:0005730',
'nucleoplasm': 'GO:0005654',
'polysome': 'GO:0005844',
'insoluble cytoplasmic fraction': 'NTR:0002594'
}
if subcellular_fraction_term_name in term_lookup:
return term_lookup.get(subcellular_fraction_term_name)
else:
return 'Term ID unknown'
@calculated_property(schema={
"title": "Summary",
"type": "string",
})
def summary(self, request,
organism=None,
donor=None,
age=None,
age_units=None,
life_stage=None,
sex=None,
biosample_term_name=None,
biosample_type=None,
starting_amount=None,
starting_amount_units=None,
depleted_in_term_name=None,
phase=None,
synchronization=None,
subcellular_fraction_term_name=None,
post_synchronization_time=None,
post_synchronization_time_units=None,
post_treatment_time=None,
post_treatment_time_units=None,
treatments=None,
part_of=None,
derived_from=None,
transfection_method=None,
transfection_type=None,
genetic_modifications=None,
constructs=None,
model_organism_donor_constructs=None,
rnais=None):
sentence_parts = [
'organism_name',
'genotype_strain',
'term_phrase',
'phase',
'fractionated',
'sex_stage_age',
'synchronization',
'modifications_list',
'derived_from',
'transfection_type',
'rnais',
'treatments_phrase',
'depleted_in',
'constructs',
'model_organism_constructs'
]
organismObject = None
donorObject = None
if organism is not None:
organismObject = request.embed(organism, '@@object')
if donor is not None:
donorObject = request.embed(donor, '@@object')
treatment_objects_list = None
if treatments is not None and len(treatments) > 0:
treatment_objects_list = []
for t in treatments:
treatment_objects_list.append(request.embed(t, '@@object'))
part_of_object = None
if part_of is not None:
part_of_object = request.embed(part_of, '@@object')
derived_from_object = None
if derived_from is not None:
derived_from_object = request.embed(derived_from, '@@object')
modifications_list = None
if genetic_modifications is not None and len(genetic_modifications) > 0:
modifications_list = []
for gm in genetic_modifications:
gm_object = request.embed(gm, '@@object')
if 'modification_techniques' in gm_object and \
len(gm_object['modification_techniques']) > 0:
for gmt in gm_object['modification_techniques']:
modifications_list.append((gm_object['modification_type'],
request.embed(gmt, '@@object')))
construct_objects_list = None
if constructs is not None and len(constructs) > 0:
construct_objects_list = []
for c in constructs:
construct_object = request.embed(c, '@@object')
target_name = construct_object['target']
if 'promoter_used' in construct_object and \
construct_object['promoter_used'] is not None:
promo = construct_object['promoter_used']
item_to_add = (construct_object,
request.embed(target_name, '@@object'),
request.embed(promo, '@@object'))
else:
item_to_add = (construct_object,
request.embed(target_name, '@@object'),
None)
construct_objects_list.append(item_to_add)
model_construct_objects_list = None
if model_organism_donor_constructs is not None and len(model_organism_donor_constructs) > 0:
model_construct_objects_list = []
for c in model_organism_donor_constructs:
construct_object = request.embed(c, '@@object')
target_name = construct_object['target']
if 'promoter_used' in construct_object and \
construct_object['promoter_used'] is not None:
promo = construct_object['promoter_used']
item_to_add = (construct_object,
request.embed(target_name, '@@object'),
request.embed(promo, '@@object'))
else:
item_to_add = (construct_object,
request.embed(target_name, '@@object'),
None)
model_construct_objects_list.append(item_to_add)
rnai_objects = None
if rnais is not None and len(rnais) > 0:
rnai_objects = []
for r in rnais:
rnai_object = request.embed(r, '@@object')
target_object = request.embed(rnai_object['target'], '@@object')
rnai_info = {'rnai_type': rnai_object['rnai_type'],
'target': target_object['label']}
rnai_objects.append(rnai_info)
biosample_dictionary = generate_summary_dictionary(
organismObject,
donorObject,
age,
age_units,
life_stage,
sex,
biosample_term_name,
biosample_type,
starting_amount,
starting_amount_units,
depleted_in_term_name,
phase,
subcellular_fraction_term_name,
synchronization,
post_synchronization_time,
post_synchronization_time_units,
post_treatment_time,
post_treatment_time_units,
transfection_type,
treatment_objects_list,
part_of_object,
derived_from_object,
modifications_list,
construct_objects_list,
model_construct_objects_list,
rnai_objects)
return construct_biosample_summary([biosample_dictionary],
sentence_parts)
def generate_summary_dictionary(
organismObject=None,
donorObject=None,
age=None,
age_units=None,
life_stage=None,
sex=None,
biosample_term_name=None,
biosample_type=None,
starting_amount=None,
starting_amount_units=None,
depleted_in_term_name=None,
phase=None,
subcellular_fraction_term_name=None,
synchronization=None,
post_synchronization_time=None,
post_synchronization_time_units=None,
post_treatment_time=None,
post_treatment_time_units=None,
| |
<filename>mycluster/DeltaTrimax.py
# -*- coding: utf-8 -*-
import numpy as np
import warnings
class EmptyTriclusterException(Exception):
pass
class DeltaTrimax():
"""
The delta-TRIMAX clustering algorithm.
Attributes
----------
D : ndarray
The data to be clustered
delta : float
The delta parameter of the algorithm. Must be > 0.0
l : float
The lambda parameter of the algorithm. Must be >= 1.0
chrom_cutoff : int
The deletion threshold for the chromosome axis
gene_cutoff : int
The deletion threshold for the gene axis
sample_cutoff : int
The deletion threshold for the sample axis
tol : float
The algorithm's tolerance
mask_mode : {'random', 'nan'}
The masking method for the clustered values. If 'random', the values
are replaced by random floats. If 'nan', they are replaced by nan
values.
n_chroms : int
The number of chromosome pairs
n_genes : int
The number of genes
n_samples : int
The number of samples
result_chroms : list of ndarray
A list of length #triclusters, containg a boolean ndarray for each
tricluster. The boolean array is of length #chromosomes and contains
True if the respective chromosome is contained in the tricluster,
False otherwise.
result_genes : list of ndarray
A list of length #triclusters, containg a boolean ndarray for each
tricluster. The boolean array is of length #genes and contains
True if the respective gene is contained in the tricluster,
False otherwise.
result_samples : list of ndarray
A list of length #triclusters, containg a boolean ndarray for each
tricluster. The boolean array is of length #samples and contains
True if the respective sample is contained in the tricluster,
False otherwise.
MSR : float
The Mean Squared Residue of each cell.
MSR_chrom : float
The Mean Squared Residue of each chromosome.
MSR_gene : float
The Mean Squared Residue of each gene.
MSR_sample : float
The Mean Squared Residue of each sample.
Methods
-------
fit(self, delta=2.5, l=1.005, chrom_cutoff=50, gene_cutoff=50,
sample_cutoff=50, tol=1e-5, mask_mode='nan', verbose=False)
Run the delta-TRIMAX algorithm for the given parameters.
get_triclusters()
Return the triclusters found by the algorithm.
References
----------
.. [1] <NAME>, <NAME>, <NAME>, <NAME>, <NAME>,
and <NAME>, ‘Coexpression and coregulation analysis of
time-series gene expression data in estrogen-induced breast cancer
cell’, Algorithms Mol. Biol., τ. 8, τχ. 1, σ 9, 2013.
"""
def __init__(self, D):
"""
Parameters
----------
D : ndarray
The data to be clustered
"""
self.D = D.copy()
def _check_parameters(self):
"""
Checks the parameters given by the user. If the values are not valid,
a ValueError is raised.
"""
if (self.delta < 0):
raise ValueError("'delta' must be > 0.0, but its value"
" is {}".format(self.delta))
if (self.l < 1):
raise ValueError("'lambda' must be >= 1.0, but its"
" value is {}".format(self.l))
if (self.gene_cutoff < 1):
raise ValueError("'gene deletion cutoff' must be > 1.0, but its"
" value is {}".format(self.gene_cutoff))
if (self.sample_cutoff < 1):
raise ValueError("'sample deletion cutoff' must be > 1.0, but its"
" value is {}".format(self.sample_cutoff))
if (self.chrom_cutoff < 1):
raise ValueError("'chromosomes deletion cutoff' must be > 1.0, but"
" its value is {}".format(self.chrom_cutoff))
if (self.mask_mode not in ['nan', 'random']):
raise ValueError("'mask mode' must be either 'nan' or 'random',"
" but its value is {}".format(self.mask_mode))
def _compute_MSR(self, chroms, genes, samples):
"""
Computes the Mean Squared Residue (MSR) for the algorithm.
Parameters
----------
chroms : ndarray
Contains 1 for a chromosome pair that belongs to the tricluster
currently examined, 0 otherwise.
genes : ndarray
Contains 1 for a gene that belongs to the tricluster currently
examined, 0 otherwise.
samples : ndarray
Contains 1 for a sample that belongs to the tricluster currently
examined, 0 otherwise.
Note
----
Updates the n_chorms, n_genes, n_samples, MSR, MSR_chrom, MSR_gene and
MSR_sample attributes.
"""
chrom_idx = np.expand_dims(np.expand_dims(np.nonzero(chroms)[0], axis=1), axis=1)
gene_idx = np.expand_dims(np.expand_dims(np.nonzero(genes)[0], axis=0), axis=2)
sample_idx = np.expand_dims(np.expand_dims(np.nonzero(samples)[0], axis=0), axis=0)
if (not chrom_idx.size) or (not gene_idx.size) or (not sample_idx.size):
raise EmptyTriclusterException()
subarr = self.D[chrom_idx, gene_idx, sample_idx]
self.n_chroms = subarr.shape[0]
self.n_genes = subarr.shape[1]
self.n_samples = subarr.shape[2]
with warnings.catch_warnings(): # We expect mean of NaNs here
warnings.simplefilter("ignore", category=RuntimeWarning)
# Computation of m_iJK
m_iJK = np.nanmean(np.nanmean(subarr, axis=2), axis=1)
m_iJK = np.expand_dims(np.expand_dims(m_iJK, axis=1), axis=1)
# Computation of m_IjK
m_IjK = np.nanmean(np.nanmean(subarr, axis=2), axis=0)
m_IjK = np.expand_dims(np.expand_dims(m_IjK, axis=0), axis=2)
# Computation of m_IJk
m_IJk = np.nansum(np.nansum(subarr, axis=0, keepdims=1), axis=1, keepdims=1)
m_IJk = m_IJk / ((subarr.shape[0] * subarr.shape[1]) - np.count_nonzero(np.isnan(subarr[:,:,0])))
# Computation of m_IJK
m_IJK = np.nanmean(subarr)
# Computation of MSR
residue = subarr - m_iJK - m_IjK - m_IJk + (2*m_IJK)
SR = np.square(residue)
self.MSR = np.nanmean(SR)
self.MSR_chrom = np.nanmean(np.nanmean(SR, axis=2), axis=1)
self.MSR_gene = np.nanmean(np.nanmean(SR, axis=2), axis=0)
self.MSR_sample = np.nanmean(np.nanmean(SR, axis=0), axis=0)
# Check tolerance
self.MSR_chrom[self.MSR_chrom < self.tol] = 0
self.MSR_gene[self.MSR_gene < self.tol] = 0
self.MSR_sample[self.MSR_sample < self.tol] = 0
self.MSR = 0 if (self.MSR < self.tol or np.isnan(self.MSR)) else self.MSR
def _single_node_deletion(self, chroms, genes, samples):
"""
The single node deletion routine of the algorithm.
Parameters
----------
chroms : ndarray
Contains 1 for a chromosome pair that belongs to the tricluster
currently examined, 0 otherwise.
genes : ndarray
Contains 1 for a gene that belongs to the tricluster currently
examined, 0 otherwise.
samples : ndarray
Contains 1 for a sample that belongs to the tricluster currently
examined, 0 otherwise.
Returns
-------
chroms : ndarray
Contains 1 for a chromosome pair that belongs to the tricluster
examined, 0 otherwise.
genes : ndarray
Contains 1 for a gene that belongs to the tricluster examined,
0 otherwise.
samples : ndarray
Contains 1 for a sample that belongs to the tricluster examined,
0 otherwise.
"""
self._compute_MSR(chroms, genes, samples)
while (self.MSR > self.delta):
chrom_idx = np.nanargmax(self.MSR_chrom)
gene_idx = np.nanargmax(self.MSR_gene)
sample_idx = np.nanargmax(self.MSR_sample)
with warnings.catch_warnings(): # We expect mean of NaNs here
warnings.simplefilter("ignore", category=RuntimeWarning)
if (self.MSR_chrom[chrom_idx] > self.MSR_gene[gene_idx]):
if (self.MSR_chrom[chrom_idx] > self.MSR_sample[sample_idx]):
# Delete chrom
nonz_idx = chroms.nonzero()[0]
chroms.put(nonz_idx[chrom_idx], 0)
else:
# Delete sample
nonz_idx = samples.nonzero()[0]
samples.put(nonz_idx[sample_idx], 0)
else:
if (self.MSR_gene[gene_idx] > self.MSR_sample[sample_idx]):
# Delete gene
nonz_idx = genes.nonzero()[0]
genes.put(nonz_idx[gene_idx], 0)
else:
# Delete sample
nonz_idx = samples.nonzero()[0]
samples.put(nonz_idx[sample_idx], 0)
self._compute_MSR(chroms, genes, samples)
return chroms, genes, samples
def _multiple_node_deletion(self, chroms, genes, samples):
"""
The multiple node deletion routine of the algorithm.
Parameters
----------
chroms : ndarray
Contains 1 for a chromosome pair that belongs to the tricluster
currently examined, 0 otherwise.
genes : ndarray
Contains 1 for a gene that belongs to the tricluster currently
examined, 0 otherwise.
samples : ndarray
Contains 1 for a sample that belongs to the tricluster currently
examined, 0 otherwise.
Returns
-------
chroms : ndarray
Contains 1 for a chromosome pair that belongs to the tricluster
examined, 0 otherwise.
genes : ndarray
Contains 1 for a gene that belongs to the tricluster examined,
0 otherwise.
samples : ndarray
Contains 1 for a sample that belongs to the tricluster examined,
0 otherwise.
"""
self._compute_MSR(chroms, genes, samples)
while (self.MSR > self.delta):
deleted = 0
with warnings.catch_warnings(): # We expect mean of NaNs here
warnings.simplefilter("ignore", category=RuntimeWarning)
if (self.n_chroms > self.chrom_cutoff):
chroms_to_del = self.MSR_chrom > (self.l * self.MSR)
nonz_idx = chroms.nonzero()[0]
if (chroms_to_del.any()):
deleted = 1
chroms.put(nonz_idx[chroms_to_del], 0)
if (self.n_genes > self.gene_cutoff):
genes_to_del = self.MSR_gene > (self.l * self.MSR)
nonz_idx = genes.nonzero()[0]
if (genes_to_del.any()):
deleted = 1
genes.put(nonz_idx[genes_to_del], 0)
if (self.n_samples > self.sample_cutoff):
samples_to_del = self.MSR_sample > (self.l * self.MSR)
nonz_idx = samples.nonzero()[0]
if (samples_to_del.any()):
deleted = 1
samples.put(nonz_idx[samples_to_del], 0)
if (not deleted):
break
self._compute_MSR(chroms, genes, samples)
return chroms, genes, samples
def _node_addition(self, chroms, genes, samples):
"""
The single node addition routine of the algorithm.
Parameters
----------
chroms : ndarray
Contains 1 for a chromosome pair that belongs to the tricluster
currently examined, 0 otherwise.
genes : ndarray
Contains 1 for a gene that belongs to the tricluster currently
examined, 0 otherwise.
samples : ndarray
Contains 1 for a sample that belongs to the tricluster currently
examined, 0 otherwise.
Returns
-------
chroms : ndarray
Contains 1 for a chromosome pair that belongs to the tricluster
examined, 0 otherwise.
genes : ndarray
Contains 1 for a gene that belongs to the tricluster examined,
0 otherwise.
samples : ndarray
Contains 1 | |
None
Number between 0 and 1. The amplitude of the flux ramp. If None,
doesn't change what's currently being used.
lms_freq_hz : float or None, optional, default None
The tracking frequency in Hz. If None,
doesn't change what's currently being used.
reset_rate_khz : float or None, optional, default None
The flux ramp reset rate in kHz. If None,
doesn't change what's currently being used.
feedback_start_frac : float or None, optional, default None
What fraction of the flux ramp to skip before
feedback. Float between 0 and 1. If None,
doesn't change what's currently being used.
feedback_end_frac : float or None, optional, default None
What fraction of the flux ramp to skip at the end of
feedback. Float between 0 and 1. If None,
doesn't change what's currently being used.
lms_enable1 : bool, optional, default None
Whether to use the first harmonic for tracking. If None,
doesn't change what's currently being used.
lms_enable2 : bool or None, optional, default None
Whether to use the second harmonic for tracking. If None,
doesn't change what's currently being used.
lms_enable3 : bool or None, optional, default None
Whether to use the third harmonic for tracking. If None,
doesn't change what's currently being used.
lms_gain : int or None, optional, default None
Tracking loop gain. If None, doesn't change what's
currently being used.
\**kwargs
Arbitrary keyword arguments. Passed to some register sets
and gets.
"""
self.log(f'Checking lock on band {band}')
if reset_rate_khz is None:
# reset_rate_khz = self.reset_rate_khz
reset_rate_khz = self.get_flux_ramp_freq()
if fraction_full_scale is None:
# fraction_full_scale = self.fraction_full_scale
fraction_full_scale = self.get_fraction_full_scale()
if lms_freq_hz is None:
# lms_freq_hz = self.lms_freq_hz[band]
lms_freq_hz = self.get_lms_freq_hz(band)
# Retrieve LMS enable status if not provided
if lms_enable1 is None:
lms_enable1 = self.get_lms_enable1(band)
if lms_enable2 is None:
lms_enable2 = self.get_lms_enable2(band)
if lms_enable3 is None:
lms_enable3 = self.get_lms_enable3(band)
if lms_gain is None:
lms_gain = self.get_lms_gain(band)
# Get feedback values
if feedback_start_frac is None:
feedback_start_frac = self._feedback_to_feedback_frac(band,
self.get_feedback_start(band))
if feedback_end_frac is None:
feedback_end_frac = self._feedback_to_feedback_frac(band,
self.get_feedback_end(band))
channels = self.which_on(band)
n_chan = len(channels)
self.log(f'Currently {n_chan} channels on')
# Tracking setup returns information on all channels in a band
f, df, sync = self.tracking_setup(band, make_plot=make_plot,
flux_ramp=flux_ramp, fraction_full_scale=fraction_full_scale,
lms_freq_hz=lms_freq_hz, reset_rate_khz=reset_rate_khz,
feedback_start_frac=feedback_start_frac,
feedback_end_frac=feedback_end_frac, lms_enable1=lms_enable1,
lms_enable2=lms_enable2, lms_enable3=lms_enable3,
lms_gain=lms_gain)
high_cut = np.array([])
low_cut = np.array([])
df_cut = np.array([])
# Make cuts
for ch in channels:
f_chan = f[:,ch]
f_span = np.max(f_chan) - np.min(f_chan)
df_rms = np.std(df[:,ch])
if f_span > f_max:
self.set_amplitude_scale_channel(band, ch, 0, **kwargs)
high_cut = np.append(high_cut, ch)
elif f_span < f_min:
self.set_amplitude_scale_channel(band, ch, 0, **kwargs)
low_cut = np.append(low_cut, ch)
elif df_rms > df_max:
self.set_amplitude_scale_channel(band, ch, 0, **kwargs)
df_cut = np.append(df_cut, ch)
chan_after = self.which_on(band)
self.log(f'High cut channels {high_cut}')
self.log(f'Low cut channels {low_cut}')
self.log(f'df cut channels {df_cut}')
self.log(f'Good channels {chan_after}')
self.log(f'High cut count: {high_cut}')
self.log(f'Low cut count: {low_cut}')
self.log(f'df cut count: {df_cut}')
self.log(f'Started with {n_chan}. Now {len(chan_after)}')
# Store the data in freq_resp
timestamp = self.get_timestamp(as_int=True)
self.freq_resp[band]['lock_status'][timestamp] = {
'action' : 'check_lock',
'flux_ramp': flux_ramp,
'f_min' : f_min,
'f_max' : f_max,
'df_max' : df_max,
'high_cut' : high_cut,
'low_cut' : low_cut,
'channels_before' : channels,
'channels_after' : chan_after
}
@set_action()
def check_lock_flux_ramp_off(self, band,df_max=.03,
make_plot=False, **kwargs):
"""
Simple wrapper function for check_lock with the flux ramp off
"""
self.check_lock(band, f_min=0., f_max=np.inf, df_max=df_max,
make_plot=make_plot, flux_ramp=False, **kwargs)
@set_action()
def find_freq(self, band, start_freq=-250, stop_freq=250, subband=None,
tone_power=None, n_read=2, make_plot=False, save_plot=True,
plotname_append='', window=50, rolling_med=True,
make_subband_plot=False, show_plot=False, grad_cut=.05,
amp_cut=.25, pad=2, min_gap=2):
'''
Finds the resonances in a band (and specified subbands)
Args
----
band : int
The band to search.
start_freq : float, optional, default -250
The scan start frequency in MHz (from band center)
stop_freq : float, optional, default 250
The scan stop frequency in MHz (from band center)
subband : deprecated, use start_freq/stop_freq.
numpy.ndarray of int or None, optional, default None
An int array for the subbands. If None, set to all
processed subbands =numpy.arange(13,115).
Takes precedent over start_freq/stop_freq.
tone_power : int or None, optional, default None
The drive amplitude. If None, takes from cfg.
n_read : int, optional, default 2
The number sweeps to do per subband.
make_plot : bool, optional, default False
Make the plot frequency sweep.
save_plot : bool, optional, default True
Save the plot.
plotname_append : str, optional, default ''
Appended to the default plot filename.
window : int, optional, default 50
The width of the rolling median window.
rolling_med : bool, optional, default True
Whether to iterate on a rolling median or just the median
of the whole sample.
grad_cut : float, optional, default 0.05
The value of the gradient of phase to look for
resonances.
amp_cut : float, optional, default 0.25
The fractional distance from the median value to decide
whether there is a resonance.
pad : int, optional, default 2
Number of samples to pad on either side of a resonance
search window
min_gap : int, optional, default 2
Minimum number of samples between resonances.
'''
band_center = self.get_band_center_mhz(band)
if subband is None:
start_subband = self.freq_to_subband(band, band_center + start_freq)[0]
stop_subband = self.freq_to_subband(band, band_center + stop_freq)[0]
step = 1
if stop_subband < start_subband:
step = -1
subband = np.arange(start_subband, stop_subband+1, step)
else:
sb, sbc = self.get_subband_centers(band)
start_freq = sbc[subband[0]]
stop_freq = sbc[subband[-1]]
# Turn off all tones in this band first. May want to make
# this only turn off tones in each sub-band before sweeping,
# instead?
self.band_off(band)
if tone_power is None:
tone_power = self._amplitude_scale[band]
self.log('No tone_power given. Using value in config ' +
f'file: {tone_power}')
self.log(f'Sweeping across frequencies {start_freq + band_center}MHz to {stop_freq + band_center}MHz')
f, resp = self.full_band_ampl_sweep(band, subband, tone_power, n_read)
timestamp = self.get_timestamp()
# Save data
save_name = '{}_amp_sweep_{}.txt'
path = os.path.join(self.output_dir, save_name.format(timestamp, 'freq'))
np.savetxt(path, f)
self.pub.register_file(path, 'sweep_response', format='txt')
path = os.path.join(self.output_dir, save_name.format(timestamp, 'resp'))
np.savetxt(path, resp)
self.pub.register_file(path, 'sweep_response', format='txt')
# Place in dictionary - dictionary declared in smurf_control
self.freq_resp[band]['find_freq'] = {}
self.freq_resp[band]['find_freq']['subband'] = subband
self.freq_resp[band]['find_freq']['f'] = f
self.freq_resp[band]['find_freq']['resp'] = resp
if 'timestamp' in self.freq_resp[band]['find_freq']:
self.freq_resp[band]['timestamp'] = \
np.append(self.freq_resp[band]['find_freq']['timestamp'], timestamp)
else:
self.freq_resp[band]['find_freq']['timestamp'] = np.array([timestamp])
# Find resonator peaks
res_freq = self.find_all_peak(self.freq_resp[band]['find_freq']['f'],
self.freq_resp[band]['find_freq']['resp'], subband,
make_plot=make_plot, plotname_append=plotname_append, band=band,
rolling_med=rolling_med, window=window,
make_subband_plot=make_subband_plot, grad_cut=grad_cut,
amp_cut=amp_cut, pad=pad, min_gap=min_gap)
self.freq_resp[band]['find_freq']['resonance'] = res_freq
# Save resonances
path = os.path.join(self.output_dir,
save_name.format(timestamp, 'resonance'))
np.savetxt(path, self.freq_resp[band]['find_freq']['resonance'])
self.pub.register_file(path, 'resonances', format='txt')
# Call plotting
if make_plot:
self.plot_find_freq(self.freq_resp[band]['find_freq']['f'],
self.freq_resp[band]['find_freq']['resp'],
subband=np.arange(self.get_number_sub_bands(band)),
save_plot=save_plot,
show_plot=show_plot,
save_name=save_name.replace('.txt', plotname_append +
'.png').format(timestamp, band))
return f, resp
@set_action()
def plot_find_freq(self, f=None, resp=None, subband=None, filename=None,
save_plot=True, save_name='amp_sweep.png', show_plot=False):
'''
Plots the response of the frequency sweep. Must input f and
resp, or give a path to a text file containing the data for
offline plotting. To do: Add ability to use timestamp and
multiple plots.
Args
----
f : float array or None, optional, default None
An array of frequency data.
resp : complex array or None, optional, default None
An array of find_freq response values.
subband : int array or None, optional, default None
A list of subbands that are scanned.
filename : str or None, optional, default None
The full path to the file where the find_freq is stored.
save_plot : bool, optional, default True
Save the plot.
save_name : str, optional, default 'amp_sweep.png'
What to name the plot.
show_plot : bool, optional, default False
Whether to show the plot.
'''
if subband is None:
subband = np.arange(self.get_number_sub_bands())
subband = np.asarray(subband)
if (f is None or resp is None) and filename is None:
self.log('No input data or file given. Nothing to plot.')
return
else:
if filename is not None:
f, resp = np.load(filename)
cm = plt.cm.get_cmap('viridis')
plt.figure(figsize=(10,4))
for i, sb in enumerate(subband):
color = cm(float(i)/len(subband)/2. + .5*(i%2))
plt.plot(f[sb,:], np.abs(resp[sb,:]), '.', markersize=4,
color=color)
plt.title("find_freq response")
plt.xlabel("Frequency offset (MHz)")
plt.ylabel("Normalized Amplitude")
if save_plot:
path = os.path.join(self.plot_dir, save_name)
plt.savefig(path, bbox_inches='tight')
self.pub.register_file(path, 'response', plot=True)
if show_plot:
plt.show()
else:
plt.close()
@set_action()
def full_band_ampl_sweep(self, band, subband, tone_power, n_read, n_step=31):
"""sweep a full band in amplitude, for finding frequencies
Args
----
band : int
bandNo (500MHz band).
subband : int
Which subbands to sweep.
tone_power : int
Drive power.
n_read : int
Numbers | |
# -*- coding: utf-8 -*-
"""
Created on Mon Jul 18 18:15:50 2016
@name: Mixed MultiNomial Logit
@author: <NAME>
@summary: Contains functions necessary for estimating mixed multinomial logit
models (with the help of the "base_multinomial_cm.py" file).
Version 1 only works for MNL kernels and only for mixing of index
coefficients.
General References
------------------
Train, K., 2009. Discrete Choice Models With Simulation. 2 ed., Cambridge
University Press, New York, NY, USA.
"""
from __future__ import absolute_import
import warnings
import numpy as np
from scipy.sparse import csr_matrix
from . import base_multinomial_cm_v2 as base_mcm
from . import choice_calcs as cc
from . import mixed_logit_calcs as mlc
from .choice_tools import get_dataframe_from_data
from .choice_tools import create_design_matrix
from .choice_tools import create_long_form_mappings
from .display_names import model_type_to_display_name
from .estimation import EstimationObj
from .estimation import estimate
# Alias necessary functions for model estimation
general_calc_probabilities = cc.calc_probabilities
general_sequence_probs = mlc.calc_choice_sequence_probs
general_log_likelihood = mlc.calc_mixed_log_likelihood
general_gradient = mlc.calc_mixed_logit_gradient
general_bhhh = mlc.calc_bhhh_hessian_approximation_mixed_logit
_msg_1 = "The Mixed MNL Model has no shape parameters. "
_msg_2 = "shape_names and shape_ref_pos will be ignored if passed."
_shape_ignore_msg = _msg_1 + _msg_2
# Create a warning string that will be issued if ridge regression is performed.
_msg_3 = "NOTE: An L2-penalized regression is being performed. The "
_msg_4 = "reported standard errors and robust standard errors "
_msg_5 = "***WILL BE INCORRECT***."
_ridge_warning_msg = _msg_3 + _msg_4 + _msg_5
def split_param_vec(beta, return_all_types=False, *args, **kwargs):
"""
Parameters
----------
beta : 1D numpy array.
All elements should by ints, floats, or longs. Should have 1 element
for each utility coefficient being estimated (i.e. num_features).
return_all_types : bool, optional.
Determines whether or not a tuple of 4 elements will be returned (with
one element for the nest, shape, intercept, and index parameters for
this model). If False, a tuple of 3 elements will be returned, as
described below.
Returns
-------
tuple.
`(None, None, beta)`. This function is merely for compatibility with
the other choice model files.
Note
----
If `return_all_types == True` then the function will return a tuple of
`(None, None, None, beta)`. These values represent the nest, shape, outside
intercept, and index coefficients for the mixed logit model.
"""
if return_all_types:
return None, None, None, beta
else:
return None, None, beta
def mnl_utility_transform(sys_utility_array, *args, **kwargs):
"""
Parameters
----------
sys_utility_array : ndarray.
Should have 1D or 2D. Should have been created by the dot product of a
design matrix and an array of index coefficients.
Returns
-------
systematic_utilities : 2D ndarray.
The input systematic utilities. If `sys_utility_array` is 2D, then
`sys_utility_array` is returned. Else, returns
`sys_utility_array[:, None]`.
"""
# Return a 2D array of systematic utility values
if len(sys_utility_array.shape) == 1:
systematic_utilities = sys_utility_array[:, np.newaxis]
else:
systematic_utilities = sys_utility_array
return systematic_utilities
def check_length_of_init_values(design_3d, init_values):
"""
Ensures that the initial values are of the correct length, given the design
matrix that they will be dot-producted with. Raises a ValueError if that is
not the case, and provides a useful error message to users.
Parameters
----------
init_values : 1D ndarray.
1D numpy array of the initial values to start the optimizatin process
with. There should be one value for each index coefficient being
estimated.
design_3d : 2D ndarray.
2D numpy array with one row per observation per available alternative.
There should be one column per index coefficient being estimated. All
elements should be ints, floats, or longs.
Returns
-------
None.
"""
if init_values.shape[0] != design_3d.shape[2]:
msg_1 = "The initial values are of the wrong dimension. "
msg_2 = "They should be of dimension {}".format(design_3d.shape[2])
raise ValueError(msg_1 + msg_2)
return None
def add_mixl_specific_results_to_estimation_res(estimator, results_dict):
"""
Stores particular items in the results dictionary that are unique to mixed
logit-type models. In particular, this function calculates and adds
`sequence_probs` and `expanded_sequence_probs` to the results dictionary.
The `constrained_pos` object is also stored to the results_dict.
Parameters
----------
estimator : an instance of the MixedEstimator class.
Should contain a `choice_vector` attribute that is a 1D ndarray
representing the choices made for this model's dataset. Should also
contain a `rows_to_mixers` attribute that maps each row of the long
format data to a unit of observation that the mixing is being performed
over.
results_dict : dict.
This dictionary should be the dictionary returned from
scipy.optimize.minimize. In particular, it should have the following
`long_probs` key.
Returns
-------
results_dict.
"""
# Get the probability of each sequence of choices, given the draws
prob_res = mlc.calc_choice_sequence_probs(results_dict["long_probs"],
estimator.choice_vector,
estimator.rows_to_mixers,
return_type='all')
# Add the various items to the results_dict.
results_dict["simulated_sequence_probs"] = prob_res[0]
results_dict["expanded_sequence_probs"] = prob_res[1]
return results_dict
class MixedEstimator(EstimationObj):
"""
Estimation object for the Mixed Logit Model.
Parameters
----------
model_obj : a pylogit.base_multinomial_cm_v2.MNDC_Model instance.
Should contain the following attributes:
- alt_IDs
- choices
- design
- intercept_ref_position
- shape_ref_position
- utility_transform
- design_3d
mapping_res : dict.
Should contain the scipy sparse matrices that map the rows of the long
format dataframe to various other objects such as the available
alternatives, the unique observations, etc. The keys that it must have
are `['rows_to_obs', 'rows_to_alts', 'chosen_row_to_obs']`
ridge : int, float, long, or None.
Determines whether or not ridge regression is performed. If a
scalar is passed, then that scalar determines the ridge penalty for
the optimization. The scalar should be greater than or equal to
zero..
zero_vector : 1D ndarray.
Determines what is viewed as a "null" set of parameters. It is
explicitly passed because some parameters (e.g. parameters that must be
greater than zero) have their null values at values other than zero.
split_params : callable.
Should take a vector of parameters, `mapping_res['rows_to_alts']`, and
model_obj.design as arguments. Should return a tuple containing
separate arrays for the model's shape, outside intercept, and index
coefficients. For each of these arrays, if this model does not contain
the particular type of parameter, the callable should place a `None` in
its place in the tuple.
constrained_pos : list or None, optional.
Denotes the positions of the array of estimated parameters that are
not to change from their initial values. If a list is passed, the
elements are to be integers where no such integer is greater than
`init_values.size.` Default == None.
weights : 1D ndarray or None, optional.
Allows for the calculation of weighted log-likelihoods. The weights can
represent various things. In stratified samples, the weights may be
the proportion of the observations in a given strata for a sample in
relation to the proportion of observations in that strata in the
population. In latent class models, the weights may be the probability
of being a particular class.
"""
def __init__(self,
model_obj,
mapping_dict,
ridge,
zero_vector,
split_params,
constrained_pos=None,
weights=None):
super(MixedEstimator, self).__init__(model_obj,
mapping_dict,
ridge,
zero_vector,
split_params,
constrained_pos=constrained_pos,
weights=weights)
# Add the 3d design matrix to the object
self.design_3d = model_obj.design_3d
return None
def convenience_split_params(self, params, return_all_types=False):
"""
Splits parameter vector into shape, intercept, and index parameters.
Parameters
----------
params : 1D ndarray.
The array of parameters being estimated or used in calculations.
return_all_types : bool, optional.
Determines whether or not a tuple of 4 elements will be returned
(with one element for the nest, shape, intercept, and index
parameters for this model). If False, a tuple of 3 elements will
be returned with one element for the shape, intercept, and index
parameters.
Returns
-------
tuple. Will have 4 or 3 elements based on `return_all_types`.
"""
return self.split_params(params,
return_all_types=return_all_types)
def check_length_of_initial_values(self, init_values):
"""
Ensures that the initial values are of the correct length.
"""
return check_length_of_init_values(self.design_3d, init_values)
def convenience_calc_probs(self, params):
"""
Calculates the probabilities of the chosen alternative, and the long
format probabilities for this model and dataset.
"""
shapes, intercepts, betas = self.convenience_split_params(params)
prob_args = (betas,
self.design_3d,
self.alt_id_vector,
self.rows_to_obs,
self.rows_to_alts,
self.utility_transform)
prob_kwargs = {"chosen_row_to_obs": self.chosen_row_to_obs,
"return_long_probs": True}
probability_results = general_calc_probabilities(*prob_args,
**prob_kwargs)
return probability_results
def convenience_calc_log_likelihood(self, params):
"""
Calculates the log-likelihood for this model and dataset.
"""
shapes, intercepts, betas = self.convenience_split_params(params)
args = [betas,
self.design_3d,
self.alt_id_vector,
self.rows_to_obs,
self.rows_to_alts,
self.rows_to_mixers,
self.choice_vector,
self.utility_transform]
kwargs = {"ridge": self.ridge, "weights": self.weights}
log_likelihood = general_log_likelihood(*args, **kwargs)
return log_likelihood
def convenience_calc_gradient(self, params):
"""
Calculates the gradient of the log-likelihood for this model / dataset.
"""
shapes, intercepts, betas = self.convenience_split_params(params)
args = [betas,
self.design_3d,
self.alt_id_vector,
self.rows_to_obs,
self.rows_to_alts,
| |
# -*- coding: utf-8 -*-
"""
SSTDR_USB.py
Created on Wed Oct 23 13:50:16 2019
@author: Cody
"""
#should launch USBPcap with arguments based on command line input.
#uses a PcapPacketReceiver to process output from USBPcap.
#notices waveforms that are transmitted and visualizes them.
#waits for user to quit, then tells receiver to halt.
"""
DEPENDENCIES
- USBPcap installation
- libusb-1.0.dll (for pyusb. needs to be found in system PATH. solves "No backend available" from pyusb)
- matplotlib (in conda)
- numpy (in conda)
- curses (in pip, use "windows-curses" on windows)
- pyformulas (in pip)
- pyaudio (required by pyformulas, in conda)
- portaudio (required by pyformulas, in conda)
- pygame (in pip)
- pyyaml (in conda)
- pyusb (in pip)
- scipy (conda)
"""
######################################################
## IMPORTS ##
######################################################
#built in python modules
import sys
import os
import subprocess
from concurrent.futures import ThreadPoolExecutor
import traceback
import threading
import time
from collections import deque
#python libraries
import numpy as np
import curses
import matplotlib.pyplot as plt
import pyformulas as pf
import pygame
import yaml
import usb
import datetime as dt
import re
#homegrown code
from PcapPacketReceiver import *
import fault_detection
import ui_elements as ui
######################################################
## CONSTANTS ##
######################################################
USE_CURSES = True
VERIFY_WAVEFORMS = True
DEBUG_LOG = True
DEBUG_VERIFICATION = False
VERBOSE_LOGGING = False
DEBUG_LAYOUT = True
#FAULT_DETECTION_METHOD = fault_detection.METHOD_NONE
FAULT_DETECTION_METHOD = fault_detection.METHOD_BLS_PEAKS
#FAULT_DETECTION_METHOD = fault_detection.METHOD_LOW_PASS_PEAKS
#FAULT_DETECTION_METHOD = fault_detection.METHOD_BLS_DEVIATION_CORRECTION
SCREEN_SIZE = SCREEN_X, SCREEN_Y = 800, 480
TERMINAL_Y = 100
VISUAL_Y = SCREEN_Y - TERMINAL_Y
BORDER_WIDTH = 3
BORDER_PADDING = 2
COLOR_WHITE = (225, 225, 225)
COLOR_GREY = (128, 128, 128)
COLOR_ORANGE = (255, 140, 0)
COLOR_BLUE = ( 0, 0, 200)
COLOR_BLACK = ( 10, 10, 10)
COLOR_RED = ( 200, 0, 0)
BG_COLOR = COLOR_GREY
TERMINAL_COLOR = COLOR_WHITE
WIRE_COLOR = COLOR_BLUE
TEXT_COLOR = COLOR_BLACK
CONNECTOR_COLOR = COLOR_RED
PANEL_SCALE = 1/15
PANEL_PADDING = (50, 50)
WIRE_WIDTH = 2
PANEL_SCREEN_X_RATIO = 1/2+1/8
CONNECTOR_SIZE = 5
CONNECTOR_WIDTH = WIRE_WIDTH
######################################################
## DEVICE SUPPORT ##
######################################################
DEVICE_PROTOTYPE = 0 #such as the UF lab's PCB SSTDR
DEVICE_COMMERCIAL = 1 #such as the commercial devices available at the U and livewire
######################################################
## STATE DEFINITION ##
######################################################
#struct for variables controlled by buttons, that thus need to be passable to functions out of main scope
class MonitorState:
def __init__(self):
self.logging = False #is the system recording data?
self.measurement_counter = 0 #the number of measurements to take before stopping logging (if 0, never stop)
self.log_number = 0 #the current log number (groups like measurements by assigning all the same number)
self.session_number = 0 #the current session number (increases by 1 every time the software is launched & pointed at the same file)
self.file_has_header = False #has the system yet to write the frst data row? (if so, write the header row in addition to data)
self.last_log_time = dt.datetime.now()
self.next_log_time = dt.datetime.now()
self.device_class = None
def main(cscreen = None):
######################################################
## STARTUP ##
######################################################
#default values
arg_filter = None
arg_address = None
input_path = "NREL_sequence_canadian_1.csv"
file_mode = False
output_path = "SSTDR_waveforms.csv"
yaml_path = 'default.yaml'
time_interval = -1
debug_log_path = 'log.txt'
baseline_indices = [0]
terminal_indices = [0]
#read cmd line arguments
valid_args = ['-yaml', 'y', '-filter', '-f', '-address', '-a', '-file', '-out', '-o', '-curses', '-c', '-no-curses', '-nc', '-interval', '-i', '-t', '-bli','-tli','-ti']
args = {}
skip = False
for i,arg in enumerate(sys.argv):
if skip:
skip = False
continue #only look at args in loop
if arg in valid_args:
skip = True #skip next word; we use it as a value here
if (i+1 < len(sys.argv)):
value = sys.argv[i+1]
if arg in ['-yaml', '-y']:
yaml_path = value
elif arg in ['-filter', '-f']:
arg_filter = int(value)
elif arg in ['-address', '-a']:
arg_address = int(value)
elif arg in ['-file']:
file_mode = True
input_path = value
elif arg in ['-bli']:
baseline_indices = [int(x) for x in value.split(',')]
elif arg in ['-tli', '-ti']:
terminal_indices = [int(x) for x in value.split(',')]
elif arg in ['-out', '-o']:
output_path = value
elif arg in ['-interval', '-i', '-t']:
try:
time_interval = int(value)
except:
if value in ['h', 'hour']:
time_interval = 3600
elif value in ['m', 'min', 'minute']:
time_interval = 60
elif value in ['s', 'second']:
time_interval = 1
#elif arg in ['-curses', '-c']:
# USE_CURSES = True
# skip = False
#elif arg in ['-no-curses', '-nc']:
# USE_CURSES = False
# skip = False
#repotr session start & info in log
if DEBUG_LOG:
debug_log(debug_log_path, "===========================================================================")
debug_log(debug_log_path, "STARTING SESSION")
debug_log(debug_log_path, "===========================================================================")
debug_log(debug_log_path, "Yaml path: "+yaml_path)
debug_log(debug_log_path, "Data input file: "+("N/A" if not file_mode else input_path))
debug_log(debug_log_path, "Output file: "+output_path)
debug_log(debug_log_path, "Time interval: "+str(time_interval))
#prepare usb sniffing
#create logging state
state = MonitorState()
if not file_mode and (arg_filter is None or arg_address is None):
#auto-detect an SSTDR device. depending on the device, determine the device class (which influences data extraction)
#try to find prototype device
sstdr_device = usb.core.find(idVendor=0x067b, idProduct=0x2303)
if sstdr_device is not None:
state.device_class = DEVICE_PROTOTYPE
else:
#try to find commercial device
sstdr_device = usb.core.find(idVendor=7214,idProduct=5)# constants for devices tested with Sam Kingston. (WILMA & ARNOLD)
if sstdr_device is not None:
state.device_class = DEVICE_COMMERCIAL
if sstdr_device is None:
print("Error: Could not automatically find SSTDR device. Either restart it or provide filter/address manually.")
return
arg_filter = sstdr_device.bus
arg_address = sstdr_device.address
usb_path = "C:\\Program Files\\USBPcap\\USBPcapCMD.exe"
usb_args = [usb_path, "-d", "\\\\.\\USBPcap" + str(arg_filter), "--devices", str(arg_address), "-o", "-"]
if DEBUG_LOG:
debug_log(debug_log_path, "Opened device: Filter "+str(arg_filter)+", Address "+str(arg_address))
debug_log(debug_log_path, "Device Class: "+str(state.device_class))
#prepare output file for logging
with open(output_path, "a+") as out_f:
out_f.seek(0,0)
first_char = out_f.read(1)
if (first_char == ''):
#file did not exist or is empty. write header row; set session/log index to 0
state.file_has_header = False
state.session_number = 0
state.log_number = 0
else:
#file was not empty. jump almost to end, read last line, extract session index
#"read up until start of last line" code from S.O. user Trasp: https://stackoverflow.com/questions/3346430/what-is-the-most-efficient-way-to-get-first-and-last-line-of-a-text-file/3346788
with open(output_path, "rb") as f:
f.seek(-2, os.SEEK_END) # Jump to the second last byte.
while f.read(1) != b"\n": # Until EOL is found...
f.seek(-2, os.SEEK_CUR) # ...jump back the read byte plus one more.
last = f.readline() # Read last line as bytes.
state.file_has_header = True
state.session_number = 1+int(chr(int.from_bytes(last.split(b',')[0],'little'))) #assumes little endian, and that session index is present in column 0 (as will be standard in the future)
state.log_number = 0
#set up scanning interface in curses (cscreen = curses screen)
print("Opening scanner interface...")
if not(cscreen is None):
cscreen.clear()
cscreen.nodelay(True)
if (file_mode):
cscreen.addstr(0,0,"Playing back input file: '" + input_path +"'...")
else:
cscreen.addstr(0,0,"Scanning on filter " + str(arg_filter) + ", address " + str(arg_address) + "...")
cscreen.addstr(1,0,"Press 'q' to stop.")
cscreen.addstr(3,0,"System OK.")
cscreen.refresh()
else:
print("Scanning on filter " + str(arg_filter) + ", address " + str(arg_address) + "...")
if (not file_mode):
#open USBPcap, throwing all output onto a pipe
usb_fd_r, usb_fd_w = os.pipe()
usbpcap_process = subprocess.Popen(usb_args, stdout=usb_fd_w)
#start receiving usbpcap output and organizing it into packets
usb_stream = os.fdopen(usb_fd_r, "rb")
#set up receiver to process raw USB bytestream
halt_threads = threading.Event()
receiver = PcapPacketReceiver(usb_stream, loop=True, halt_event=halt_threads)
#prepare deque for waveform visualization; only stores a few of the most recently received waveforms. appended entries cycle out old ones
#larger deque -> more maximum latency between visualization and actual system state
#smaller deque -> not sure why this would be a problem (something about losing information if packets aren't received constantly)
wf_deque = deque(maxlen=1)
#prepare to visualize waveforms
fig = plt.figure()
plot_window = pf.screen(title='SSTDR Correlation Waveform')
######################################################
## PYGAME SETUP ##
######################################################
#initializing pygame
pygame.init()
pscreen = pygame.display.set_mode(SCREEN_SIZE)
pygame.display.set_caption("PV Fault Scanner")
#loading assets, preparing pre-baked surfaces
FONT_PATH = os.path.join("Assets", "Titillium-Regular.otf")
TERMINAL_FONT = pygame.font.Font(FONT_PATH, 40)
STATUS_FONT = pygame.font.Font(FONT_PATH, 20)
panel_surf = pygame.image.load(os.path.join("Assets", "PV_panel_CharlesMJames_CC.jpg"))
panel_surf = pygame.transform.scale(panel_surf, (int(panel_surf.get_width()*PANEL_SCALE), int(panel_surf.get_height()*PANEL_SCALE)))
panel_rect = panel_surf.get_rect()
#grass_surf = pygame.image.load(os.path.join("Assets", "grass.png"))
#grass_rect = grass_surf.get_rect()
hazard_surf = pygame.image.load(os.path.join("Assets", "hazard.png"))
hazard_rect = hazard_surf.get_rect()
bg_surf = pygame.Surface(pscreen.get_size())
bg_surf.convert()
bg_rect = bg_surf.get_rect()
bg_surf.fill(BG_COLOR)
line_surf = pygame.Surface((SCREEN_X, BORDER_WIDTH))
line_surf.fill(COLOR_ORANGE)
line_rect = line_surf.get_rect()
line_rect.y = VISUAL_Y - BORDER_WIDTH - int(BORDER_PADDING/2)
bg_surf.blit(line_surf, line_rect)
line_surf.fill(COLOR_BLUE)
line_rect.move_ip(0, BORDER_WIDTH + BORDER_PADDING)
bg_surf.blit(line_surf, line_rect)
text_surf = STATUS_FONT.render("Scanning at 24MHz...", True, COLOR_WHITE)
text_rect = text_surf.get_rect()
text_rect.move_ip(3,3)
bg_surf.blit(text_surf, text_rect)
text_surf = STATUS_FONT.render("Selected Array Layout: " + yaml_path, True, COLOR_WHITE)
text_rect = text_surf.get_rect()
text_rect.x = 3
text_rect.bottom = VISUAL_Y - BORDER_WIDTH - int(0.5*BORDER_PADDING) - 3
bg_surf.blit(text_surf, text_rect)
#load panel layout
panel_layout, connector_ds, panel_length = load_panel_layout(yaml_path)
panel_cols = panel_rows = 0
try:
N = len(connector_ds) #number of connectors; = (panel count)+1
P = N-1 #number of panels
H = | |
SimpleDate(2012, 5, 19, 12, 0, tz='CLT')
SimpleDate('2012-05-19 12:00:00.000000 CLT')
The `format` parameter describes the output format, used for display.
If `None`, then an ISO-8601-like format is used, unless the value is
parsed from a string, in which case the format used for parsing is
preserved.
:param year_or_auto: Either the year (if month and day also present) or a value that will be interpreted by type.
:param month: The month, if an explicit date is being given.
:param day: The day, if an explicit date is being given.
:param hour: The hour, if an explicit date is being given (default 0).
:param minute: The minute, if an explicit date is being given (default 0).
:param second: The second, if an explicit date is being given (default 0).
:param microsecond: Microseconds, if an explicit date is being given (default 0).
:param simple: An existing instance, which will be copied.
:param datetime: A `dt.datetime`.
:param date: A `dt.date` instance, which will be combined with `time` (default today).
:param ordinal: A Gregorian ordinal, which will be combined with `time`.
:param time: A `dt.time` instance, which will be combined with `date` or `ordinal` (default midnight).
:param timestamp: A Posix timestamp (aka Unix epoch).
:param tz: A time zone to use if none available in the date (`None` is local).
:param is_dst: Whether the time being processed is DST (used in some corner cases to get the timezone correct - see pytz docs).
:param country: A country code (or list of codes) (`None` means derive from `tz`).
:param quality: Controls how the timezone is selected from multiple values: HIGH requires a unique match; MEDIUM accepts ambiguity if all offsets are the same; LOW takes the first value found.
:param tz_factory: Used to convert `tz`, and also anything parsed from an input string, to a `dt.tzinfo` instance (default DEFAULT_TZ_FACTORY).
:param format: The format used for output (also used to parse input string if `date_parser` is `None`).
:param date_parser: Used to parse an input string (default DEFAULT_DATE_PARSER, combined with `format` if given).
:param equality: How do we compare values? See SimpleDate.EQUALITY. Default is DEFAULT_EQUALITY unless `simple` given (in which case it is copied).
:param unsafe: Take the first timezone found.
:param debug: If true, print a description of the logic followed.
:return: A Date instance, containing the given data.
'''
log = self._get_log(debug)
format = auto_invert(format, log)
# gentle reader, this may look like a huge, impenetrable block of
# code, but it's actually not doing anything clever - just many small
# steps to support the different ways the constructor can be called.
# if none of the other 'main' arguments (that define the datetime)
# are defined, then the `year_or_auto` parameter is treated as 'auto'
# (if it were a year, `month` and `day` are also required). so here we
# examine the type and set the implicit argument, erasing year_or_auto
# when we do so. in this way, at the end of this section, we are in a
# state 'as if' the constructor had been called with the correct
# parameter set. for example, if `year_or_auto` is a `dt.date`
# instance we will set `date`.
if test_all(is_none, month, day, hour, minute, second, microsecond,
simple, time, date, datetime, timestamp, ordinal):
log('Inferring auto argument')
if isinstance(year_or_auto, SimpleDate):
log('Found a DTime instance')
simple, year_or_auto = year_or_auto, None
# ordering important here as issubclass(datetime, date)
elif isinstance(year_or_auto, dt.datetime):
log('Found a datetime instance')
datetime, year_or_auto = year_or_auto, None
elif isinstance(year_or_auto, dt.date):
log('Found a date instance')
date, year_or_auto = year_or_auto, None
elif isinstance(year_or_auto, dt.time):
log('Found a time instance')
time, year_or_auto = year_or_auto, None
elif isinstance(year_or_auto, int) or isinstance(year_or_auto, float):
log('Found a numeric value, will use as Unix epoch')
timestamp, year_or_auto = year_or_auto, None
elif isinstance(year_or_auto, str):
# if we have a string, use `date_parser` to create a SimpleDate
# instance (passing `tz`, `format`, etc) and then clear
# everything else.
log('Found a string, will try to parse')
if date_parser is None:
if format:
log('Creating date parser with given format plus defaults')
date_parser = SimpleDateParser(always_tuple(format) + DEFAULT_FORMATS)
else:
log('Using default date parser')
date_parser = DEFAULT_DATE_PARSER
else:
log('Using given date parser')
datetime, read_fmt, write_fmt = date_parser.parse(year_or_auto, tz=tz, is_dst=is_dst, country=country, tz_factory=tz_factory, unsafe=unsafe, debug=debug)
year_or_auto, tz = None, None # clear tz so it's not re-checked later
# if someone supplied a single format, always use it for writes.
# but check whether we have the correct translated version.
format = single_format(format)
if format is None:
format = write_fmt
else:
if format == read_fmt:
format = write_fmt
else:
format = strip(format)
log('Format was not used to parse, so strip to {0}', format)
elif year_or_auto is not None:
raise SimpleDateError('Cannot convert {0!r} for year_or_auto', year_or_auto)
# so now the 'auto' parameter has been converted and we can address
# the different cases the constructor handles in turn.
# if all the date components are missing then we must process the
# more complex types (`date`, `time`, etc). in general, only one of
# those is supported at a time, but there are a couple of special
# case pairs (basically, combining dates and time) that we handle first
# by combining them to datetime and then deleting the original
# (similar to the way 'auto' was handled above).
if test_all(is_none, year_or_auto, month, day, hour, minute, second, microsecond):
# special case - convert `ordinal` to `date` and then fall through
# to next case below (combining with time).
if date is None and ordinal is not None:
date = dt.date.fromordinal(ordinal)
log('Converted ordinal {0} to date {1}', ordinal, date)
ordinal = None
# special case - combine date and/or time into datetime
if datetime is None and (date is not None or time is not None):
# the tricky part here is bootstrapping tz correctly. we start
# by making sure that `time` has a value.
if time is None:
# we know date is defined, so use a zero time in the
# datetime to bootstrap the tz
tzinfo = tz_factory.search(tz, datetime=dt.datetime.combine(date, dt.time()), is_dst=is_dst, country=country, unsafe=unsafe, debug=debug)
log('Have a date, but no time, so using midnight in {0}', tzinfo)
time = dt.time(tzinfo=tzinfo)
elif time.tzinfo is None:
# similarly, fix a naive time (TODO - we use today's UTC date - that may not be right?)
tzinfo = tz_factory.search(tz, datetime=dt.datetime.combine(dt.datetime.utcnow().date(), time), is_dst=is_dst, country=country, unsafe=unsafe, debug=debug)
log('Setting timezone for time to {0}', tzinfo)
time = time.replace(tzinfo=tzinfo)
# so now we have a time that is guaranteed to exist and have
# a valid tzinfo
if date is None:
log('Have a time, but no date, so using today')
date = dt.datetime.now(tz=time.tzinfo).date()
log('Combining date and time')
datetime = reapply_tzinfo(dt.datetime.combine(date, time), is_dst)
date, time = None, None
# move simple to datetime here so that we can check tz below
if simple is not None:
datetime = simple.datetime
log('Using datetime from simple: {0}', datetime)
if not format:
format = simple.format
log('Using format from simple: {0}', format)
simple = None
# with the special cases handled (and reduced to a single
# `datetime`) we should have only a single parameter remaining
# (all other combinations are unsupported).
multiple = names(2, is_not_none, simple=simple, time=time, date=date, datetime=datetime, epoch=timestamp)
if multiple:
args = ', '.join(multiple)
log('Too many, possibly contradicting, values: {0}', args)
raise SimpleDateError('Cannot specify ' + args + ' together')
# pick off the remaining parameters, one by one.
if datetime is not None and tz is not None:
# we need to check that a tzinfo that was implicit in other
# parameters is consistent with the explicit value
tz_factory.search(datetime.tzinfo, tz, datetime=datetime, is_dst=is_dst, country=country, unsafe=unsafe, debug=debug)
elif time is not None:
raise SimpleDateError('Inconsistent code: time should already have been converted')
elif date is not None:
raise SimpleDateError('Inconsistent code: date should already have been converted')
elif timestamp is not None:
log('Converting Unix epoch to | |
"""
Cubes
=====
Tools to deal with spectroscopic data cubes.
Some features in Cubes require additional packages:
* smoothing - requires agpy_\'s smooth and parallel_map routines
* `pyregion <git://github.com/astropy/pyregion.git>`_
The 'grunt work' is performed by the :py:mod:`cubes` module
"""
from __future__ import print_function
import time
import sys
import traceback
import numpy as np
import types
import copy
import itertools
from ..specwarnings import warn,PyspeckitWarning
import astropy
from astropy.io import fits
from astropy import log
from astropy import wcs
from astropy import units
from astropy.utils.console import ProgressBar
from six import iteritems, string_types
from functools import wraps
# import parent package
from .. import spectrum
from ..spectrum import smooth
from ..spectrum.units import (generate_xarr, SpectroscopicAxis,
SpectroscopicAxes)
from ..parallel_map import parallel_map
from ..spectrum import history
# import local things
from . import mapplot
from . import cubes
def not_for_cubes(func):
@wraps(func)
def wrapper(*args):
warn("This operation ({0}) operates on the spectrum selected "
"from the cube, e.g. with `set_spectrum` or `set_apspec`"
", it does not operate on the whole cube.", PyspeckitWarning)
return func(*args)
return wrapper
class Cube(spectrum.Spectrum):
def __init__(self, filename=None, cube=None, xarr=None, xunit=None,
errorcube=None, header=None, x0=0, y0=0,
maskmap=None,
**kwargs):
"""
A pyspeckit Cube object. Can be created from a FITS file on disk or
from an array or a `spectral_cube.SpectralCube` object. If an array
is used to insantiate the cube, the `xarr` keyword must be given,
specifying the X-axis units
Parameters
----------
filename : str, optional
The name of a FITS file to open and read from. Must be 3D
cube : `np.ndarray`, `spectral_cube.SpectralCube`, or \
`astropy.units.Quantity`
The data from which to instantiate a Cube object. If it is
an array or an astropy Quantity (which is an array with attached
units), the X-axis must be specified. If this is given as a
SpectralCube object, the X-axis and units should be handled
automatically.
xarr : `np.ndarray` or `astropy.units.Quantity`, optional
The X-axis of the spectra from each cube. This actually
corresponds to axis 0, or what we normally refer to as the Z-axis
of the cube, but it indicates the X-axis in a plot of intensity vs
wavelength. The units for this array are specified in the `xunit`
keyword unless a `~astropy.units.Quantity` is given.
xunit : str, optional
The unit of the ``xarr`` array if ``xarr`` is given as a numpy
array
errorcube : `np.ndarray`, `spectral_cube.SpectralCube`,\
or `~astropy.units.Quantity`, optional
A cube with the same shape as the input cube providing the 1-sigma
error for each voxel. This can be specified more efficiently as an
error map for most use cases, but that approach has not yet been
implemented. However, you can pass a 2D error map to `fiteach`.
header : `fits.Header` or dict, optional
The header associated with the data. Only needed if the cube is
given as an array or a quantity.
x0, y0 : int
The initial spectrum to use. The `Cube` object can be treated as
a `pyspeckit.Spectrum` object, with all the associated tools
(plotter, fitter) using the `set_spectrum` method to select a pixel
from the cube to plot and fit. However, it is generally more sensible
to extract individual spectra and treat them separately using the
`get_spectrum` method, so these keywords MAY BE DEPRECATED in the
future.
maskmap : `np.ndarray`, optional
A boolean mask map, where ``True`` implies that the data are good.
This will be used for both plotting using `mapplot` and fitting
using `fiteach`.
"""
if filename is not None:
self.load_fits(filename)
return
else:
if hasattr(cube, 'spectral_axis'):
# Load from a SpectralCube instance
self.cube = cube.hdu.data
if (cube.unit in ('undefined', units.dimensionless_unscaled)
and 'BUNIT' in cube._meta):
self.unit = cube._meta['BUNIT']
else:
self.unit = cube.unit
log.debug("Self.unit: {0}".format(self.unit))
if xarr is None:
if cube.spectral_axis.flags['OWNDATA']:
xarr = SpectroscopicAxis(cube.spectral_axis,
unit=cube.spectral_axis.unit,
refX=cube.wcs.wcs.restfrq,
refX_unit='Hz')
else:
xarr = SpectroscopicAxis(cube.spectral_axis.copy(),
unit=cube.spectral_axis.unit,
refX=cube.wcs.wcs.restfrq,
refX_unit='Hz')
if header is None:
header = cube.header
elif hasattr(cube, 'unit'):
self.cube = cube.value
self.unit = cube.unit
else:
self.cube = cube
if hasattr(errorcube, 'spectral_axis'):
# Load from a SpectralCube instance
self.errorcube = errorcube.hdu.data
elif hasattr(errorcube, 'unit'):
self.errorcube = errorcube.value
else:
self.errorcube = errorcube
if hasattr(xarr, 'flags'):
log.debug("XARR flags: {0}".format(xarr.flags))
self.xarr = generate_xarr(xarr, unit=xunit)
if hasattr(xarr, 'flags'):
log.debug("self.xarr flags: {0}".format(xarr.flags))
self.header = header
self.error = None
if self.cube is not None:
self.data = self.cube[:,int(y0),int(x0)]
if not hasattr(self, '_unit'):
self.unit = units.dimensionless_unscaled
log.debug("Self.unit before header: {0}".format(self.unit))
if self.header is not None:
self.parse_header(self.header)
else:
log.debug("self.header is None: {0}".format(self.header))
self.unit = 'undefined'
self.header = fits.Header()
log.debug("Self.unit after header: {0}".format(self.unit))
if maskmap is not None:
if maskmap.ndim != 2:
raise ValueError("Mask map must be two-dimensional.")
self.maskmap = maskmap
else:
self.maskmap = np.ones(self.cube.shape[1:],dtype='bool')
if isinstance(filename,str):
self.fileprefix = filename.rsplit('.', 1)[0] # Everything prior to .fits or .txt
else:
self.fileprefix = "pyfitsHDU"
self.plotter = spectrum.plotters.Plotter(self)
self._register_fitters()
self.specfit = spectrum.fitters.Specfit(self,Registry=self.Registry)
self.baseline = spectrum.baseline.Baseline(self)
self.speclines = spectrum.speclines
# Initialize writers
self.writer = {}
for writer in spectrum.writers.writers:
self.writer[writer] = spectrum.writers.writers[writer](self)
# Special. This needs to be modified to be more flexible; for now I need it to work for nh3
self.plot_special = None
self.plot_special_kwargs = {}
self._modelcube = None
if self.header:
self.wcs = wcs.WCS(self.header)
self.wcs.wcs.fix()
self._spectral_axis_number = self.wcs.wcs.spec+1
self._first_cel_axis_num = np.where(self.wcs.wcs.axis_types // 1000 == 2)[0][0]+1
# TODO: Improve this!!!
self.system = ('galactic'
if ('CTYPE{0}'.format(self._first_cel_axis_num)
in self.header and 'GLON' in
self.header['CTYPE{0}'.format(self._first_cel_axis_num)])
else 'celestial')
else:
self._spectral_axis_number = 2
self._first_cel_axis_num = 0
self.system = 'PIXEL'
self.mapplot = mapplot.MapPlotter(self)
def load_fits(self, fitsfile):
try:
from spectral_cube import SpectralCube
except ImportError:
raise ImportError("Could not import spectral_cube. As of pyspeckit"
" 0.17, spectral_cube is required for cube reading. "
"It can be pip installed or acquired from "
"spectral-cube.rtfd.org.")
mycube = SpectralCube.read(fitsfile)
return self.load_spectral_cube(mycube)
def load_spectral_cube(self, cube):
"""
Load the cube from a spectral_cube.SpectralCube object
"""
self.__init__(cube=cube)
def __repr__(self):
return (r'<Cube object over spectral range %6.5g :'
' %6.5g %s and flux range = [%2.1f, %2.1f]'
' %s with shape %r at %s>' %
(self.xarr.min().value, self.xarr.max().value, self.xarr.unit,
self.data.min(), self.data.max(), self.unit, self.cube.shape,
str(hex(self.__hash__()))))
def copy(self,deep=True):
"""
Create a copy of the spectral cube with its own plotter, fitter, etc.
Useful for, e.g., comparing smoothed to unsmoothed data
"""
newcube = copy.copy(self)
newcube.header = copy.copy(self.header)
deep_attr_lst = ['xarr', 'data', 'cube', 'maskmap',
'error', 'errorcube']
if deep:
for attr in deep_attr_lst:
setattr(newcube, attr, copy.copy(getattr(self, attr)))
if hasattr(self, 'wcs'):
newcube.wcs = self.wcs.deepcopy()
newcube.header = self.header.copy()
newcube.plotter = self.plotter.copy(parent=newcube)
newcube._register_fitters()
newcube.specfit = self.specfit.copy(parent=newcube)
newcube.specfit.Spectrum.plotter = newcube.plotter
newcube.baseline = self.baseline.copy(parent=newcube)
newcube.baseline.Spectrum.plotter = newcube.plotter
newcube.mapplot = self.mapplot.copy(parent=newcube)
newcube.mapplot.Cube = newcube
return newcube
def _update_header_from_xarr(self):
"""Uses SpectroscopiAxis' _make_header method to update Cube header"""
self.header['NAXIS3'] = self.xarr.size
self.xarr._make_header()
sp_naxis = self._spectral_axis_number
# change keywords in xarr._make_header from, e.g., CRPIX1 to CRPIX3
newhead = {(key.replace('1', str(sp_naxis))
if key.endswith('1') else key): val
for key, val in iteritems(self.xarr.wcshead)}
for key, val in iteritems(newhead):
if isinstance(val, units.Quantity):
newhead[key] = val.value
elif (isinstance(val, units.CompositeUnit)
or isinstance(val, units.Unit)):
newhead[key] = val.to_string()
log.debug("Updating header: {}: {}".format(key, val))
self.header.update(newhead)
def slice(self, start=None, stop=None, unit='pixel', preserve_fits=False,
copy=True, update_header=False):
"""
Slice a cube along the spectral axis
(equivalent to "spectral_slab" from the spectral_cube package)
Parameters
----------
start : numpy.float or int
start of slice
stop : numpy.float or int
stop of slice
unit : str
allowed values are any supported physical unit, 'pixel'
update_header : bool
modifies the header of the spectral cube according to the slice
"""
x_in_units = self.xarr.as_unit(unit)
start_ind = x_in_units.x_to_pix(start)
stop_ind = x_in_units.x_to_pix(stop)
if start_ind > stop_ind:
start_ind, stop_ind = stop_ind, start_ind
spectrum_slice = slice(start_ind,stop_ind)
if not copy:
raise NotImplementedError("Must copy when slicing a cube.")
newcube = self.copy()
newcube.cube = newcube.cube[spectrum_slice,:,:]
if hasattr(newcube,'errcube'):
newcube.errcube = newcube.errcube[spectrum_slice,:,:]
newcube.data = newcube.data[spectrum_slice]
if newcube.error is not None:
newcube.error = newcube.error[spectrum_slice]
newcube.xarr = newcube.xarr[spectrum_slice]
# create new specfit / baseline instances (otherwise they'll be the wrong length)
newcube._register_fitters()
newcube.baseline = spectrum.baseline.Baseline(newcube)
newcube.specfit = spectrum.fitters.Specfit(newcube,Registry=newcube.Registry)
if preserve_fits:
newcube.specfit.modelpars = self.specfit.modelpars
newcube.specfit.parinfo = self.specfit.parinfo
newcube.baseline.baselinepars = self.baseline.baselinepars
newcube.baseline.order = self.baseline.order
# modify the header in the new cube
if update_header:
newcube._update_header_from_xarr()
# create a new wcs instance from the updated header
newcube.wcs = wcs.WCS(newcube.header)
newcube.wcs.wcs.fix()
newcube._spectral_axis_number = newcube.wcs.wcs.spec + 1
newcube._first_cel_axis_num = np.where(newcube.wcs.wcs.axis_types
// 1000 == 2)[0][0] + 1
return newcube
def __getitem__(self, indx):
"""
If [] is used on a cube, slice on the cube and use
the first dimension to slice on | |
import cdflib
import numpy as np
from pytplot import clip, options, store_data, ylim, zlim, get_data
from ..load import load
def hep(trange=['2017-03-27', '2017-03-28'],
datatype='omniflux',
level='l2',
suffix='',
get_support_data=False,
varformat=None,
varnames=[],
downloadonly=False,
notplot=False,
no_update=False,
uname=None,
passwd=None,
time_clip=False,
ror=True,
version=None):
"""
This function loads data from the HEP experiment from the Arase mission
Parameters:
trange : list of str
time range of interest [starttime, endtime] with the format
'YYYY-MM-DD','YYYY-MM-DD'] or to specify more or less than a day
['YYYY-MM-DD/hh:mm:ss','YYYY-MM-DD/hh:mm:ss']
datatype: str
Data type; Valid options:
level: str
Data level; Valid options:
suffix: str
The tplot variable names will be given this suffix. By default,
no suffix is added.
get_support_data: bool
Data with an attribute "VAR_TYPE" with a value of "support_data"
will be loaded into tplot. By default, only loads in data with a
"VAR_TYPE" attribute of "data".
varformat: str
The file variable formats to load into tplot. Wildcard character
"*" is accepted. By default, all variables are loaded in.
varnames: list of str
List of variable names to load (if not specified,
all data variables are loaded)
downloadonly: bool
Set this flag to download the CDF files, but not load them into
tplot variables
notplot: bool
Return the data in hash tables instead of creating tplot variables
no_update: bool
If set, only load data from your local cache
time_clip: bool
Time clip the variables to exactly the range specified in the trange keyword
ror: bool
If set, print PI info and rules of the road
version: str
Set this value to specify the version of cdf files (such as "v01_02", "v01_03", ...)
Returns:
List of tplot variables created.
"""
file_res = 3600. * 24
prefix = 'erg_hep_'+level+'_'
if level == 'l2':
pathformat = 'satellite/erg/hep/'+level+'/'+datatype + \
'/%Y/%m/erg_hep_'+level+'_'+datatype + '_%Y%m%d_'
if version is None:
pathformat += 'v??_??.cdf'
else:
pathformat += version + '.cdf'
if level == 'l3':
pathformat = 'satellite/erg/hep/'+level + \
'/pa/%Y/%m/erg_hep_'+level+'_pa_%Y%m%d_'
if version is None:
pathformat += 'v??_??.cdf'
else:
pathformat += version + '.cdf'
initial_notplot_flag = False
if notplot:
initial_notplot_flag = True
if ((level == 'l2') and (datatype == 'omniflux')) or (datatype == '3dflux') or (level == 'l3'):
# to avoid failure of creation plot variables (at store_data.py) of hep
notplot = True
loaded_data = load(pathformat=pathformat, trange=trange, level=level, datatype=datatype, file_res=file_res, prefix=prefix, suffix=suffix, get_support_data=get_support_data,
varformat=varformat, varnames=varnames, downloadonly=downloadonly, notplot=notplot, time_clip=time_clip, no_update=no_update, uname=uname, passwd=passwd, version=version)
if (len(loaded_data) > 0) and ror:
try:
if isinstance(loaded_data, list):
if downloadonly:
cdf_file = cdflib.CDF(loaded_data[-1])
gatt = cdf_file.globalattsget()
else:
gatt = get_data(loaded_data[-1], metadata=True)['CDF']['GATT']
elif isinstance(loaded_data, dict):
gatt = loaded_data[list(loaded_data.keys())[-1]]['CDF']['GATT']
# --- print PI info and rules of the road
print(' ')
print(
'**************************************************************************')
print(gatt["LOGICAL_SOURCE_DESCRIPTION"])
print('')
print('PI: ', gatt['PI_NAME'])
print("Affiliation: "+gatt["PI_AFFILIATION"])
print('')
print('- The rules of the road (RoR) common to the ERG project:')
print(
' https://ergsc.isee.nagoya-u.ac.jp/data_info/rules_of_the_road.shtml.en')
print(
'- RoR for HEP data: https://ergsc.isee.nagoya-u.ac.jp/mw/index.php/ErgSat/Hep')
if level == 'l3':
print(
'- RoR for MGF data: https://ergsc.isee.nagoya-u.ac.jp/mw/index.php/ErgSat/Mgf')
print('')
print('Contact: erg_hep_info at isee.nagoya-u.ac.jp')
print(
'**************************************************************************')
except:
print('printing PI info and rules of the road was failed')
if initial_notplot_flag or downloadonly:
return loaded_data
if isinstance(loaded_data, dict):
if (level == 'l2') and (datatype == 'omniflux'):
tplot_variables = []
if prefix + 'FEDO_L' + suffix in loaded_data:
v_vars_min = loaded_data[prefix + 'FEDO_L' + suffix]['v'][0]
v_vars_max = loaded_data[prefix + 'FEDO_L' + suffix]['v'][1]
# log average of energy bins
v_vars = np.power(
10., (np.log10(v_vars_min) + np.log10(v_vars_max)) / 2.)
store_data(prefix + 'FEDO_L' + suffix, data={'x': loaded_data[prefix + 'FEDO_L' + suffix]['x'],
'y': loaded_data[prefix + 'FEDO_L' + suffix]['y'],
'v': v_vars},
attr_dict={'CDF':loaded_data[prefix + 'FEDO_L' + suffix]['CDF']})
tplot_variables.append(prefix + 'FEDO_L' + suffix)
if prefix + 'FEDO_H' + suffix in loaded_data:
v_vars_min = loaded_data[prefix + 'FEDO_H' + suffix]['v'][0]
v_vars_max = loaded_data[prefix + 'FEDO_H' + suffix]['v'][1]
# log average of energy bins
v_vars = np.power(
10., (np.log10(v_vars_min) + np.log10(v_vars_max)) / 2.)
store_data(prefix + 'FEDO_H' + suffix, data={'x': loaded_data[prefix + 'FEDO_H' + suffix]['x'],
'y': loaded_data[prefix + 'FEDO_H' + suffix]['y'],
'v': v_vars},
attr_dict={'CDF':loaded_data[prefix + 'FEDO_H' + suffix]['CDF']})
tplot_variables.append(prefix + 'FEDO_H' + suffix)
# remove minus valuse of y array
if prefix + 'FEDO_L' + suffix in tplot_variables:
clip(prefix + 'FEDO_L' + suffix, 0., 1.0e+10)
if prefix + 'FEDO_H' + suffix in tplot_variables:
clip(prefix + 'FEDO_H' + suffix, 0., 1.0e+10)
# set spectrogram plot option
options(prefix + 'FEDO_L' + suffix, 'Spec', 1)
options(prefix + 'FEDO_H' + suffix, 'Spec', 1)
# set y axis to logscale
options(prefix + 'FEDO_L' + suffix, 'ylog', 1)
options(prefix + 'FEDO_H' + suffix, 'ylog', 1)
# set yrange
options(prefix + 'FEDO_L' + suffix, 'yrange', [3.0e+01, 2.0e+03])
options(prefix + 'FEDO_H' + suffix, 'yrange', [7.0e+01, 2.0e+03])
# set ytitle
options(prefix + 'FEDO_L' + suffix, 'ytitle',
'HEP-L\nomniflux\nLv2\nEnergy')
options(prefix + 'FEDO_H' + suffix, 'ytitle',
'HEP-H\nomniflux\nLv2\nEnergy')
# set ysubtitle
options(prefix + 'FEDO_L' + suffix, 'ysubtitle', '[keV]')
options(prefix + 'FEDO_H' + suffix, 'ysubtitle', '[keV]')
# set ylim
if prefix + 'FEDO_L' + suffix in tplot_variables:
ylim(prefix + 'FEDO_L' + suffix, 30, 1800)
if prefix + 'FEDO_H' + suffix in tplot_variables:
ylim(prefix + 'FEDO_H' + suffix, 500, 2048)
# set z axis to logscale
options(prefix + 'FEDO_L' + suffix, 'zlog', 1)
options(prefix + 'FEDO_H' + suffix, 'zlog', 1)
# set zrange
options(prefix + 'FEDO_L' + suffix, 'zrange', [1.0e-15, 1.0e+06])
options(prefix + 'FEDO_H' + suffix, 'zrange', [1.0e-10, 1.0e+5])
# set ztitle
options(prefix + 'FEDO_L' + suffix,
'ztitle', '[/cm^{2}-str-s-keV]')
options(prefix + 'FEDO_H' + suffix,
'ztitle', '[/cm^{2}-str-s-keV]')
# set zlim
if prefix + 'FEDO_L' + suffix in tplot_variables:
zlim(prefix + 'FEDO_L' + suffix, 1e+0, 1e+5)
if prefix + 'FEDO_H' + suffix in tplot_variables:
zlim(prefix + 'FEDO_H' + suffix, 1e+0, 1e+5)
# change colormap option
options(prefix + 'FEDO_L' + suffix, 'Colormap', 'jet')
options(prefix + 'FEDO_H' + suffix, 'Colormap', 'jet')
return tplot_variables
if (level == 'l2') and (datatype == '3dflux'):
tplot_variables = []
v2_array = [i for i in range(15)]
if prefix + 'FEDU_L' + suffix in loaded_data:
store_data(prefix + 'FEDU_L' + suffix, data={'x': loaded_data[prefix + 'FEDU_L' + suffix]['x'],
'y': loaded_data[prefix + 'FEDU_L' + suffix]['y'],
'v1': np.sqrt(loaded_data[prefix + 'FEDU_L' + suffix]['v'][0, :] *
loaded_data[prefix + 'FEDU_L' + suffix]['v'][1, :]), # geometric mean for 'v1'
'v2': v2_array},
attr_dict={'CDF':loaded_data[prefix + 'FEDU_L' + suffix]['CDF']})
tplot_variables.append(prefix + 'FEDU_L' + suffix)
clip(prefix + 'FEDU_L' + suffix, -1.0e+10, 1.0e+10)
if prefix + 'FEDU_H' + suffix in loaded_data:
store_data(prefix + 'FEDU_H' + suffix, data={'x': loaded_data[prefix + 'FEDU_H' + suffix]['x'],
'y': loaded_data[prefix + 'FEDU_H' + suffix]['y'],
'v1': np.sqrt(loaded_data[prefix + 'FEDU_H' + suffix]['v'][0, :] *
loaded_data[prefix + 'FEDU_H' + suffix]['v'][1, :]), # geometric mean for 'v1'
'v2': v2_array},
attr_dict={'CDF':loaded_data[prefix + 'FEDU_H' + suffix]['CDF']})
tplot_variables.append(prefix + 'FEDU_H' + suffix)
clip(prefix + 'FEDU_H' + suffix, -1.0e+10, 1.0e+10)
return tplot_variables
if level == 'l3': # implementation for level = 'l3'
tplot_variables = []
if prefix + 'FEDU_L' + suffix in loaded_data:
L_energy_array_ave = np.sqrt(loaded_data[prefix + 'FEDU_L' + suffix]['v1'][0, :] *
loaded_data[prefix + 'FEDU_L' + suffix]['v1'][1, :]) # geometric mean for 'v1'
# get energy [keV] array for ytitle options
L_energy_array = np.trunc(L_energy_array_ave).astype(int)
non_negative_y_array = np.where(
loaded_data[prefix + 'FEDU_L' + suffix]['y'] < 0., np.nan, loaded_data[prefix + 'FEDU_L' + suffix]['y'])
store_data(prefix + 'FEDU_L' + suffix, data={'x': loaded_data[prefix + 'FEDU_L' + suffix]['x'],
'y': non_negative_y_array,
'v1': L_energy_array_ave,
'v2': loaded_data[prefix + 'FEDU_L' + suffix]['v2']},
attr_dict={'CDF':loaded_data[prefix + 'FEDU_L' + suffix]['CDF']})
options(prefix + 'FEDU_L' + suffix, 'spec', 1)
# set ylim
ylim(prefix + 'FEDU_L' + suffix, 0, 180)
# set zlim
zlim(prefix + 'FEDU_L' + suffix, 1e+2, 1e+6)
tplot_variables.append(prefix + 'FEDU_L' + suffix)
# make Tplot Variables of erg_hep_l3_FEDU_L_paspec_ene?? (??: 00, 01, 02, ..., 15)
for i in range(loaded_data[prefix + 'FEDU_L' + suffix]['y'].shape[1]):
tplot_name = prefix + 'FEDU_L_paspec_ene' + \
str(i).zfill(2) + suffix
store_data(tplot_name, data={'x': loaded_data[prefix + 'FEDU_L' + suffix]['x'],
'y': non_negative_y_array[:, i, :],
'v': loaded_data[prefix + 'FEDU_L' + suffix]['v2']},
attr_dict={'CDF':loaded_data[prefix + 'FEDU_L' + suffix]['CDF']})
options(tplot_name, 'spec', 1)
# set ylim
ylim(tplot_name, 0, 180)
# set zlim
zlim(tplot_name, 1e+2, 1e+6)
# set ytitle
options(
tplot_name, 'ytitle', f'HEP-L\nEne{str(i).zfill(2)}\n{L_energy_array[i]} keV')
tplot_variables.append(tplot_name)
if prefix + 'FEDU_H' + suffix in loaded_data:
H_energy_array_ave = np.sqrt(loaded_data[prefix + 'FEDU_H' + suffix]['v1'][0, :] *
loaded_data[prefix + 'FEDU_H' + suffix]['v1'][1, :]) # geometric mean for 'v1'
# get energy | |
unique k-mers' in data, data
assert 'false positive rate estimated to be 0.002' in data
def test_oxli_build_graph_write_fp():
script = 'oxli'
# use small HT
args = ['build-graph', '-x', '1e5', '-N', '2', '-k', '20']
outfile = utils.get_temp_filename('out')
infile = utils.get_test_data('random-20-a.fa')
args.extend([outfile, infile])
(status, out, err) = utils.runscript(script, args)
ht_file = outfile + '.pt'
assert os.path.exists(ht_file), ht_file
info_file = outfile + '.info'
assert os.path.exists(info_file), info_file
data = [x.strip() for x in open(info_file)]
data = set(data)
assert '3959 unique k-mers' in data
assert 'false positive rate estimated to be 0.002' in data
def test_load_graph_multithread():
script = 'load-graph.py'
outfile = utils.get_temp_filename('test')
infile = utils.get_test_data('test-reads.fa')
args = ['-N', '4', '-x', '1e7', '-T', '8', outfile, infile]
(status, out, err) = utils.runscript(script, args)
def test_oxli_build_graph_multithread():
script = 'oxli'
outfile = utils.get_temp_filename('test')
infile = utils.get_test_data('test-reads.fa')
args = ['build-graph', '-N', '4', '-x', '1e7', '-T', '8', outfile, infile]
(status, out, err) = utils.runscript(script, args)
def test_load_graph_max_memory_usage_parameter():
script = 'load-graph.py'
args = ['-M', '2e7', '-k', '20', '-n']
outfile = utils.get_temp_filename('out')
infile = utils.get_test_data('random-20-a.fa')
args.extend([outfile, infile])
(status, out, err) = utils.runscript(script, args)
assert 'Total number of unique k-mers: 3960' in err, err
ht_file = outfile + '.pt'
assert os.path.exists(ht_file), ht_file
try:
ht = khmer.load_hashbits(ht_file)
except IOError as err:
assert 0, str(err)
assert (sum(ht.hashsizes()) / 8.) < 2e7, ht.hashsizes()
def _make_graph(infilename, min_hashsize=1e7, n_hashes=2, ksize=20,
do_partition=False,
annotate_partitions=False,
stop_big_traverse=False):
script = 'load-graph.py'
args = ['-x', str(min_hashsize), '-N', str(n_hashes), '-k', str(ksize)]
outfile = utils.get_temp_filename('out')
infile = infilename
args.extend([outfile, infile])
utils.runscript(script, args)
ht_file = outfile + '.pt'
assert os.path.exists(ht_file), ht_file
tagset_file = outfile + '.tagset'
assert os.path.exists(tagset_file), tagset_file
if do_partition:
script = 'partition-graph.py'
args = [outfile]
if stop_big_traverse:
args.insert(0, '--no-big-traverse')
utils.runscript(script, args)
script = 'merge-partitions.py'
args = [outfile, '-k', str(ksize)]
utils.runscript(script, args)
final_pmap_file = outfile + '.pmap.merged'
assert os.path.exists(final_pmap_file)
if annotate_partitions:
script = 'annotate-partitions.py'
args = ["-k", str(ksize), outfile, infilename]
in_dir = os.path.dirname(outfile)
utils.runscript(script, args, in_dir)
baseinfile = os.path.basename(infilename)
assert os.path.exists(os.path.join(in_dir, baseinfile + '.part'))
return outfile
def _DEBUG_make_graph(infilename, min_hashsize=1e7, n_hashes=2, ksize=20,
do_partition=False,
annotate_partitions=False,
stop_big_traverse=False):
script = 'load-graph.py'
args = ['-x', str(min_hashsize), '-N', str(n_hashes), '-k', str(ksize)]
outfile = utils.get_temp_filename('out')
infile = utils.get_test_data(infilename)
args.extend([outfile, infile])
utils.runscript(script, args)
ht_file = outfile + '.ct'
assert os.path.exists(ht_file), ht_file
tagset_file = outfile + '.tagset'
assert os.path.exists(tagset_file), tagset_file
if do_partition:
print(">>>> DEBUG: Partitioning <<<")
script = 'partition-graph.py'
args = [outfile]
if stop_big_traverse:
args.insert(0, '--no-big-traverse')
utils.runscript(script, args)
print(">>>> DEBUG: Merging Partitions <<<")
script = 'merge-partitions.py'
args = [outfile, '-k', str(ksize)]
utils.runscript(script, args)
final_pmap_file = outfile + '.pmap.merged'
assert os.path.exists(final_pmap_file)
if annotate_partitions:
print(">>>> DEBUG: Annotating Partitions <<<")
script = 'annotate-partitions.py'
args = ["-k", str(ksize), outfile, infilename]
in_dir = os.path.dirname(outfile)
utils.runscript(script, args, in_dir)
baseinfile = os.path.basename(infilename)
assert os.path.exists(os.path.join(in_dir, baseinfile + '.part'))
return outfile
def test_partition_graph_1():
graphbase = _make_graph(utils.get_test_data('random-20-a.fa'))
script = 'partition-graph.py'
args = [graphbase]
utils.runscript(script, args)
script = 'merge-partitions.py'
args = [graphbase, '-k', str(20)]
utils.runscript(script, args)
final_pmap_file = graphbase + '.pmap.merged'
assert os.path.exists(final_pmap_file)
ht = khmer.load_hashbits(graphbase + '.pt')
ht.load_tagset(graphbase + '.tagset')
ht.load_partitionmap(final_pmap_file)
x = ht.count_partitions()
assert x == (1, 0), x # should be exactly one partition.
def test_partition_graph_nojoin_k21():
# test with K=21
graphbase = _make_graph(utils.get_test_data('random-20-a.fa'), ksize=21)
script = 'partition-graph.py'
args = [graphbase]
utils.runscript(script, args)
script = 'merge-partitions.py'
args = [graphbase, '-k', str(21)]
utils.runscript(script, args)
final_pmap_file = graphbase + '.pmap.merged'
assert os.path.exists(final_pmap_file)
ht = khmer.load_hashbits(graphbase + '.pt')
ht.load_tagset(graphbase + '.tagset')
ht.load_partitionmap(final_pmap_file)
x = ht.count_partitions()
assert x == (99, 0), x # should be 99 partitions at K=21
def test_partition_graph_nojoin_stoptags():
# test with stoptags
graphbase = _make_graph(utils.get_test_data('random-20-a.fa'))
# add in some stop tags
ht = khmer.load_hashbits(graphbase + '.pt')
ht.add_stop_tag('TTGCATACGTTGAGCCAGCG')
stoptags_file = graphbase + '.stoptags'
ht.save_stop_tags(stoptags_file)
del ht
# run script with stoptags option
script = 'partition-graph.py'
args = ['--stoptags', stoptags_file, graphbase]
utils.runscript(script, args)
script = 'merge-partitions.py'
args = [graphbase, '-k', str(20)]
utils.runscript(script, args)
final_pmap_file = graphbase + '.pmap.merged'
assert os.path.exists(final_pmap_file)
ht = khmer.load_hashbits(graphbase + '.pt')
ht.load_tagset(graphbase + '.tagset')
ht.load_partitionmap(final_pmap_file)
x = ht.count_partitions()
assert x == (2, 0), x # should be 2 partitions
def test_partition_graph_big_traverse():
graphbase = _make_graph(utils.get_test_data('biglump-random-20-a.fa'),
do_partition=True, stop_big_traverse=False)
final_pmap_file = graphbase + '.pmap.merged'
assert os.path.exists(final_pmap_file)
ht = khmer.load_hashbits(graphbase + '.pt')
ht.load_tagset(graphbase + '.tagset')
ht.load_partitionmap(final_pmap_file)
x = ht.count_partitions()
assert x == (1, 0), x # should be exactly one partition.
def test_partition_graph_no_big_traverse():
# do NOT exhaustively traverse
graphbase = _make_graph(utils.get_test_data('biglump-random-20-a.fa'),
do_partition=True, stop_big_traverse=True)
final_pmap_file = graphbase + '.pmap.merged'
assert os.path.exists(final_pmap_file)
ht = khmer.load_hashbits(graphbase + '.pt')
ht.load_tagset(graphbase + '.tagset')
ht.load_partitionmap(final_pmap_file)
x = ht.count_partitions()
assert x[0] == 4, x # should be four partitions, broken at knot.
def test_partition_find_knots_execute():
graphbase = _make_graph(utils.get_test_data('random-20-a.fa'))
script = 'partition-graph.py'
args = [graphbase]
utils.runscript(script, args)
script = 'find-knots.py'
args = [graphbase]
utils.runscript(script, args)
stoptags_file = graphbase + '.stoptags'
assert os.path.exists(stoptags_file)
def test_annotate_partitions():
seqfile = utils.get_test_data('random-20-a.fa')
graphbase = _make_graph(seqfile, do_partition=True)
in_dir = os.path.dirname(graphbase)
# get the final pmap file
final_pmap_file = graphbase + '.pmap.merged'
assert os.path.exists(final_pmap_file)
script = 'annotate-partitions.py'
args = ["-k", "20", graphbase, seqfile]
utils.runscript(script, args, in_dir)
partfile = os.path.join(in_dir, 'random-20-a.fa.part')
parts = [r.name.split('\t')[1] for r in screed.open(partfile)]
parts = set(parts)
assert '2' in parts
assert len(parts) == 1
def test_annotate_partitions_2():
# test with K=21 (no joining of sequences)
seqfile = utils.get_test_data('random-20-a.fa')
graphbase = _make_graph(seqfile, do_partition=True,
ksize=21)
in_dir = os.path.dirname(graphbase)
# get the final pmap file
final_pmap_file = graphbase + '.pmap.merged'
assert os.path.exists(final_pmap_file)
script = 'annotate-partitions.py'
args = ["-k", "21", graphbase, seqfile]
utils.runscript(script, args, in_dir)
partfile = os.path.join(in_dir, 'random-20-a.fa.part')
parts = [r.name.split('\t')[1] for r in screed.open(partfile)]
parts = set(parts)
print(parts)
assert len(parts) == 99, len(parts)
def test_extract_partitions():
seqfile = utils.get_test_data('random-20-a.fa')
graphbase = _make_graph(
seqfile, do_partition=True, annotate_partitions=True)
in_dir = os.path.dirname(graphbase)
# get the final part file
partfile = os.path.join(in_dir, 'random-20-a.fa.part')
# ok, now run extract-partitions.
script = 'extract-partitions.py'
args = ['extracted', partfile]
utils.runscript(script, args, in_dir)
distfile = os.path.join(in_dir, 'extracted.dist')
groupfile = os.path.join(in_dir, 'extracted.group0000.fa')
assert os.path.exists(distfile)
assert os.path.exists(groupfile)
dist = open(distfile).readline()
assert dist.strip() == '99 1 1 99'
parts = [r.name.split('\t')[1] for r in screed.open(partfile)]
assert len(parts) == 99, len(parts)
parts = set(parts)
assert len(parts) == 1, len(parts)
def test_extract_partitions_header_whitespace():
seqfile = utils.get_test_data('test-overlap2.fa')
graphbase = _make_graph(
seqfile, do_partition=True, annotate_partitions=True)
in_dir = os.path.dirname(graphbase)
# get the final part file
partfile = os.path.join(in_dir, 'test-overlap2.fa.part')
# ok, now run extract-partitions.
script = 'extract-partitions.py'
args = ['extracted', partfile]
utils.runscript(script, args, in_dir)
distfile = os.path.join(in_dir, 'extracted.dist')
groupfile = os.path.join(in_dir, 'extracted.group0000.fa')
assert os.path.exists(distfile)
assert os.path.exists(groupfile)
dist = open(distfile).readline()
assert dist.strip() == '1 11960 11960 11960', dist.strip()
parts = [r.name.split('\t')[1]
for r in screed.open(partfile, parse_description=False)]
assert len(parts) == 13538, len(parts)
parts = set(parts)
assert len(parts) == 12602, len(parts)
def test_extract_partitions_fq():
seqfile = utils.get_test_data('random-20-a.fq')
graphbase = _make_graph(
seqfile, do_partition=True, annotate_partitions=True)
in_dir = os.path.dirname(graphbase)
# get the final part file
partfile = os.path.join(in_dir, 'random-20-a.fq.part')
# ok, now run extract-partitions.
script = 'extract-partitions.py'
args = ['extracted', partfile]
utils.runscript(script, args, in_dir)
distfile = os.path.join(in_dir, 'extracted.dist')
groupfile = os.path.join(in_dir, 'extracted.group0000.fq')
assert os.path.exists(distfile)
assert os.path.exists(groupfile)
dist = open(distfile).readline()
assert dist.strip() == '99 1 1 99'
screed_iter = screed.open(partfile, parse_description=False)
names = [r.name.split('\t')[0] for r in screed_iter]
assert '35 1::FOO' in names
assert '46 1::FIZ' in names
screed_iter = screed.open(partfile, parse_description=False)
parts = [r.name.split('\t')[1] for r in screed_iter]
assert len(parts) == 99, len(parts)
parts = set(parts)
assert len(parts) == 1, len(parts)
quals = set([r.quality for r in screed.open(partfile)])
quals = list(quals)
assert quals[0], quals
def test_extract_partitions_output_unassigned():
seqfile = utils.get_test_data('random-20-a.fa')
graphbase = _make_graph(
seqfile, do_partition=True, annotate_partitions=True)
in_dir = os.path.dirname(graphbase)
# get the final part file
partfile = os.path.join(in_dir, 'random-20-a.fa.part')
# ok, now run extract-partitions.
script = 'extract-partitions.py'
args = ['-U', 'extracted', partfile]
utils.runscript(script, args, in_dir)
distfile = os.path.join(in_dir, 'extracted.dist')
groupfile = os.path.join(in_dir, 'extracted.group0000.fa')
unassigned_file = os.path.join(in_dir, 'extracted.unassigned.fa')
assert os.path.exists(distfile)
assert os.path.exists(groupfile)
assert os.path.exists(unassigned_file)
dist = open(distfile).readline()
assert dist.strip() == '99 1 1 99'
parts = [r.name.split('\t')[1] for r in screed.open(partfile)]
assert len(parts) == 99, len(parts)
parts = set(parts)
assert len(parts) == 1, len(parts)
def test_extract_partitions_no_output_groups():
seqfile = utils.get_test_data('random-20-a.fq')
graphbase = _make_graph(
seqfile, do_partition=True, annotate_partitions=True)
in_dir = os.path.dirname(graphbase)
# get the final part file
partfile = os.path.join(in_dir, 'random-20-a.fq.part')
# ok, now run extract-partitions.
script = 'extract-partitions.py'
args = ['-n', 'extracted', partfile]
# We expect a sys.exit -> we need the test to be tolerant
_, out, err = utils.runscript(script, args, in_dir, fail_ok=True)
assert "NOT outputting groups! Beware!" in err
# Group files | |
<gh_stars>10-100
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
.. module:: redgem
:platform: Unix, Windows
:synopsis: RedGEM Algorithm
.. moduleauthor:: pyTFA team
Model class
"""
import networkx as nx
from cobra import Metabolite, Reaction, Model
from copy import deepcopy
class NetworkExpansion:
def __init__(self, gem, core_subsystems, extracellular_system,
cofactors, small_metabolites, inorganics,
d, n):
"""
A class encapsulating the RedGEM algorithm
:param gem: The studied GEM
:param core_subsystems: Core subsystems
:param extracellular_system: Extracellular metabolite ids
:param cofactors: List of cofactors id
:param small_metabolites: List of small metabolites id
:param inorganics: List of inorganics id
:param d: Degree
:param n: User parameter
"""
# Shallow copy of the GEM : the deepcopy is possibly performed in redgem, before
# calling NetworkExpansion
self._redgem = gem
#self._redgem.name = 'redgem'
self._graph = nx.DiGraph()
# Subsystems
self._core_subsystems = core_subsystems
self._subsystem_count = len(core_subsystems)
self._extracellular_system = extracellular_system
# Dicts to save extracted reactions and metabolites for each subsystem
# TODO: Improve structure definition
dict_of_lists_of_sets = {}
for name in core_subsystems:
dict_of_lists_of_sets[name] = [set() for _ in range(d+1)]
dict_of_dicts_of_lists_of_sets = {}
for name in core_subsystems:
dict_of_dicts_of_lists_of_sets[name] = deepcopy(dict_of_lists_of_sets)
dict_of_int = {}
for name in core_subsystems:
dict_of_int[name] = -1
dict_of_dicts_of_int = {}
for name in core_subsystems:
dict_of_dicts_of_int[name] = deepcopy(dict_of_int)
self._subsystem_reactions = {}
self._subsystem_reactions_id = {}
self._intermediate_reactions_id = deepcopy(dict_of_dicts_of_lists_of_sets)
self._subsystem_metabolites = {}
self._subsystem_metabolites_id = {}
self._intermediate_metabolites_id = deepcopy(dict_of_dicts_of_lists_of_sets)
self._intermediate_paths = deepcopy(dict_of_dicts_of_lists_of_sets)
self._min_distance_sub_to_sub = deepcopy(dict_of_dicts_of_int)
self._intermediate_extracellular_paths = deepcopy(dict_of_lists_of_sets)
self._intermediate_extracellular_metabolites_id = deepcopy(dict_of_lists_of_sets)
self._intermediate_extracellular_reactions_id = deepcopy(dict_of_lists_of_sets)
self._path_dict = {}
# Save others parameters
self._cofactor_pairs = cofactors
self._small_metabolites = small_metabolites
self._inorganics = inorganics
self._d = d
self._n = n
def extract_subsystem_reactions(self, subsystem):
"""
Extracts all reactions of a subsystem and stores them and their id in the corresponding
dictionary.
:param subsystem: Name of the subsystem
:return: Extracted reactions
"""
rxns = set()
rxns_id = set()
for rxn in self._redgem.reactions:
if rxn.subsystem == subsystem:
rxns.add(rxn)
rxns_id.add(rxn.id)
self._subsystem_reactions[subsystem] = rxns
self._subsystem_reactions_id[subsystem] = rxns_id
return rxns
def extract_subsystem_metabolites(self, subsystem):
"""
Extracts all metabolites of a subsystem and stores them and their id in the corresponding
dictionary.
:param subsystem: Name of the subsystem
:return: Extracted metabolites
"""
subsystem_rxns = self._subsystem_reactions[subsystem]
metabolites = set()
metabolites_id = set()
for rxn in subsystem_rxns:
for metabolite in rxn.metabolites:
metabolite_id = metabolite.id
if metabolite_id in self._cofactor_pairs \
or metabolite_id in self._small_metabolites \
or metabolite_id in self._inorganics:
continue
metabolites.add(metabolite)
metabolites_id.add(metabolite.id)
self._subsystem_metabolites[subsystem] = metabolites
self._subsystem_metabolites_id[subsystem] = metabolites_id
return metabolites
def create_new_stoichiometric_matrix(self):
"""
Extracts the new graph without the small metabolites, inorganics and cofactor pairs.
:return: Networkx graph of the new network
"""
kept_rxns = []
kept_metabolites = set()
for rxn in self._redgem.reactions:
metabolites = {}
for metabolite, coefficient in rxn.metabolites.items():
metabolite_id = metabolite.id
if metabolite_id in self._cofactor_pairs \
or metabolite_id in self._small_metabolites \
or metabolite_id in self._inorganics:
continue
new_metabolite = Metabolite(metabolite_id,
formula=metabolite.formula,
name=metabolite.name,
compartment=metabolite.compartment)
metabolites[new_metabolite] = coefficient
kept_metabolites.add(metabolite)
new_rxn = Reaction(rxn.id,
name=rxn.name,
subsystem=rxn.subsystem,
lower_bound=rxn.lower_bound,
upper_bound=rxn.upper_bound)
new_rxn.add_metabolites(metabolites)
kept_rxns.append(new_rxn)
paths_struct = [{} for _ in range(self._d+1)] # Comprehension list to create multiple dicts
to_struct = [""] * (self._d+1)
for metabolite in kept_metabolites:
self._graph.add_node(metabolite.id, paths=paths_struct, to=to_struct)
for rxn in kept_rxns:
for reactant in rxn.reactants:
for product in rxn.products:
self._graph.add_edge(reactant.id, product.id, rxn_id=rxn.id, weight=1)
return self._graph
def breadth_search_subsystems_paths_length_d(self, subsystem_i, subsystem_j, d):
"""
Breadth first search from each metabolite in subsystem i with special stop conditions
during exploration for paths of length d.
This function explores the graph through allowed paths only : this path can't go through
subsystem i or j but must start in i and end in j. The length of each path found is d.
:param subsystem_i: Source subsystem
:param subsystem_j: Destination subsystem
:param d: Path length desired
:return: None
"""
for metabolite_id in self._subsystem_metabolites_id[subsystem_i]:
# Find metabolites at a distance d from metabolite_id
ancestors = {}
frontier = {metabolite_id}
explored = {metabolite_id}
for i in range(d):
new_nodes = set()
for current_node in frontier:
for new_node in set(self._graph.adj[current_node]):
if self.is_node_allowed(new_node, i, explored, subsystem_i, subsystem_j, d):
new_nodes.add(new_node)
# new_node can already be in ancestors if there are 2 paths of same
# length to it
if new_node in ancestors:
ancestors[new_node].append(current_node)
else:
ancestors[new_node] = [current_node]
explored = explored.union(new_nodes)
frontier = new_nodes
# Handle d = 0 case, since it didn't go through the loop
if d == 0 and metabolite_id not in self._subsystem_metabolites_id[subsystem_j]:
frontier = {}
# Retrieve and save metabolites, reactions and paths
for node in frontier:
paths = self.retrieve_all_paths(node, metabolite_id, ancestors)
self._intermediate_paths[subsystem_i][subsystem_j][d] = \
self._intermediate_paths[subsystem_i][subsystem_j][d].union(set(paths))
self.retrieve_intermediate_metabolites_and_reactions(paths, subsystem_i,
subsystem_j, d)
def is_node_allowed(self, node, i, explored, subsystem_i, subsystem_j, d):
"""
Checks whether or not a metabolite is allowed for the current path.
The new node is added if it is not already explored, if it is not in the source subsystem,
and if it is not in the destination subsystem, except if it is the last round
of exploration
:param node: Metabolite id
:param i: Current step
:param explored: Explored node for this path
:param subsystem_i: Source subsystem
:param subsystem_j: Destination subsystem
:param d: Path length desired
:return: Boolean answering the question
"""
if node in explored:
return False
if subsystem_i != subsystem_j and node in self._subsystem_metabolites_id[subsystem_i]:
return False
if i < d-1 and node in self._subsystem_metabolites_id[subsystem_j]:
return False
if i == d-1 and node not in self._subsystem_metabolites_id[subsystem_j]:
return False
return True
def retrieve_all_paths(self, dest_node, src_node, ancestors, init_dict=True):
"""
Retrieves all paths between a source metabolite and a destination metabolite after a
breadth first search.
This function is a recursive function, which makes use of dynamic programming to reduce
its complexity. It uses self._path_dict to store already computed data.
:param dest_node: Destination metabolite
:param src_node: Source metabolite
:param ancestors: Dictionary with ancestors found during the search
:param init_dict: Boolean, for function initialisation
:return: A list of all paths as tuples
"""
if init_dict:
self._path_dict = {}
if dest_node == src_node:
self._path_dict[dest_node] = [(src_node,)]
if dest_node not in self._path_dict:
new_paths = []
for previous_node in ancestors[dest_node]:
for path in self.retrieve_all_paths(previous_node, src_node, ancestors, False):
new_paths.append(path + (dest_node,))
self._path_dict[dest_node] = new_paths
return self._path_dict[dest_node]
def retrieve_intermediate_metabolites_and_reactions(self, paths, subsystem_i, subsystem_j, d):
"""
Retrieves and stores intermediate metabolites and reactions (i.e. M_{i,j}, R_{i,j},
M_{i,i} and R_{i,i}).
This function adds all reactions contained in these paths, and all metabolites between
:param paths: List of paths between subsystems
:param subsystem_i: Source subsystem
:param subsystem_j: Destination subsystem
:param d: Path length
:return: None
"""
for path in paths:
for i in range(len(path)-1):
reaction = self._graph[path[i]][path[i+1]]['rxn_id']
self._intermediate_reactions_id[subsystem_i][subsystem_j][d].add(reaction)
if i > 0:
self._intermediate_metabolites_id[subsystem_i][subsystem_j][d].add(path[i])
def find_min_distance_between_subsystems(self):
"""
Find minimal distance between each subsystems in both directions
:return: Dict with distances
"""
for i in self._core_subsystems:
for j in self._core_subsystems:
for k in range(self._d+1):
# If there path of length d
if self._intermediate_paths[i][j][k]:
self._min_distance_sub_to_sub[i][j] = k
break
# If min distance os not found, then
if self._min_distance_sub_to_sub[i][j] == -1:
pass
return self._min_distance_sub_to_sub
def breadth_search_extracellular_system_paths(self, subsystem, n):
"""
Breadth first search from each metabolite in the extracellular system with special stop
conditions during exploration for paths of length n.
This function explores the graph through allowed paths only : this path can't go through
the extracellular system or the subsystem but must start in the extracellular system and
end in the subsystem. The length of each path found is n.
:param subsystem: Destination subsystem
:param n: Path length desired
:return: None
"""
for metabolite_id in self._extracellular_system:
# Find metabolites at a distance n from metabolite_id
if metabolite_id not in self._graph:
continue
ancestors = {}
frontier = {metabolite_id}
explored = {metabolite_id}
for i in range(n):
new_nodes = set()
for current_node in frontier:
for new_node in set(self._graph.adj[current_node]):
if self.is_node_allowed_extracellular(new_node, i, explored, subsystem, n):
new_nodes.add(new_node)
# new_node can already be in ancestors if there are 2 paths of same
# length to it
if new_node in ancestors:
ancestors[new_node].append(current_node)
else:
ancestors[new_node] = [current_node]
explored = explored.union(new_nodes)
frontier = new_nodes
# Handle n = 0 case, since it didn't go through the loop
if n == 0 and metabolite_id not in self._subsystem_metabolites_id[subsystem]:
frontier = {}
# Retrieve and save metabolites, reactions and paths
for node in frontier:
paths = self.retrieve_all_paths(node, metabolite_id, ancestors)
| |
= await self.publicGetMarketsIdTrades(self.extend(request, params))
#
# [
# {
# "id":251199246,
# "side":2,
# "price":0.022044,
# "executedAt":1588830682664,
# "qty":0.13545846,
# "makerId":"67ed6ef3-33d8-4389-ba70-5c68d9db9f6c",
# "takerId":"229ef0d6-fe67-4b5d-9733-824142fab8f3"
# }
# ]
#
return self.parse_trades(response, market, since, limit)
async def fetch_order_book(self, symbol, limit=None, params={}):
await self.load_markets()
request = {
'id': self.market_id(symbol),
}
response = await self.marketsGetIdOrderBook(self.extend(request, params))
#
# {
# "type":"ob_snapshot",
# "marketId":3,
# "timestamp":1588836429847,
# "bids":[
# [0.021694,8.8793688,1], # price, amount, count
# [0.01937,7.1340473,1],
# [0.020774,3.314881,1],
# ],
# "asks":[
# [0.02305,8.8793688,1],
# [0.028022,3.314881,1],
# [0.022598,3.314881,1],
# ]
# }
#
timestamp = self.safe_integer(response, 'timestamp')
return self.parse_order_book(response, timestamp)
def parse_ohlcv(self, ohlcv, market=None):
#
# {
# "time":1588807500000,
# "open":0.022077,
# "high":0.022077,
# "low":0.022051,
# "close":0.022051,
# "volume":10.532025119999997
# }
#
return [
self.safe_integer(ohlcv, 'time'),
self.safe_float(ohlcv, 'open'),
self.safe_float(ohlcv, 'high'),
self.safe_float(ohlcv, 'low'),
self.safe_float(ohlcv, 'close'),
self.safe_float(ohlcv, 'volume'),
]
async def fetch_ohlcv(self, symbol, timeframe='1m', since=None, limit=None, params={}):
request = {
# 'id': market['id'],
'interval': self.timeframes[timeframe],
# 'start': 1588830682664, # milliseconds
# 'end': 1588830682664, # milliseconds
}
duration = self.parse_timeframe(timeframe)
now = self.milliseconds()
if since is not None:
request['start'] = since
if limit is None:
request['end'] = now
else:
request['end'] = self.sum(since, duration * limit * 1000)
elif limit is not None:
request['start'] = now - duration * limit * 1000
request['end'] = now
else:
raise ArgumentsRequired(self.id + ' fetchOHLCV requires a since argument, or a limit argument, or both')
await self.load_markets()
market = self.market(symbol)
request['id'] = market['id']
response = await self.publicGetMarketsIdOhlcv(self.extend(request, params))
#
# [
# {"time":1588807500000,"open":0.022077,"high":0.022077,"low":0.022051,"close":0.022051,"volume":10.532025119999997},
# {"time":1588807800000,"open":0.022051,"high":0.022051,"low":0.022044,"close":0.022044,"volume":0.655987},
# {"time":1588808400000,"open":0.022044,"high":0.022044,"low":0.022044,"close":0.022044,"volume":3.9615545499999993},
# ]
#
return self.parse_ohlcvs(response, market, timeframe, since, limit)
async def fetch_balance(self, params={}):
await self.load_markets()
request = {
'id': self.uid,
}
response = await self.privateGetAccountsIdBalances(self.extend(request, params))
#
# [
# {
# "assetId":"USDT",
# "available":"25",
# "balance":"25",
# "reserved":"0",
# "balanceBtc":"0.0",
# "balanceRef":"0.0",
# }
# ]
#
result = {'info': response}
for i in range(0, len(response)):
balance = response[i]
currencyId = self.safe_string(balance, 'assetId')
code = self.safe_currency_code(currencyId)
account = {
'free': self.safe_float(balance, 'available'),
'used': self.safe_float(balance, 'reserved'),
'total': self.safe_float(balance, 'balance'),
}
result[code] = account
return self.parse_balance(result)
async def fetch_order(self, id, symbol=None, params={}):
await self.load_markets()
request = {
'id': id,
}
response = await self.privateGetOrdersId(self.extend(request, params))
#
# {
# "id": "30a2b5d0-be2e-4d0a-93ed-a7c45fed1792",
# "accountId": "30a2b5d0-be2e-4d0a-93ed-a7c45fed1792",
# "marketId": 123,
# "type": 1,
# "side": 1,
# "qty": "1.23456",
# "cost": "1.23456",
# "remainingQty": "1.23456",
# "remainingCost": "1.23456",
# "limitPrice": "1.23456",
# "stopPrice": "1.23456",
# "postOnly": False,
# "timeInForce": "GTC",
# "state": 1,
# "closeReason": "FILLED",
# "placedAt": 1556355722341,
# "closedAt": 1556355722341
# }
#
return self.parse_order(response)
def parse_order_status(self, status):
statuses = {
'1': None, # pending
'2': 'open', # open
'3': 'open', # partially filled
'4': 'closed', # closed
'FILLED': 'closed',
'USER_REQUESTED_CANCEL': 'canceled',
'ADMINISTRATIVE_CANCEL': 'canceled',
'NOT_ENOUGH_LIQUIDITY': 'canceled',
'EXPIRED': 'expired',
'ONE_CANCELS_OTHER': 'canceled',
}
return self.safe_string(statuses, status)
def parse_order(self, order, market=None):
#
# fetchOrder, fetchOpenOrders, fetchClosedOrders
#
# {
# "id": "30a2b5d0-be2e-4d0a-93ed-a7c45fed1792",
# "accountId": "30a2b5d0-be2e-4d0a-93ed-a7c45fed1792",
# "marketId": 123,
# "type": 1,
# "side": 1,
# "qty": "1.23456",
# "cost": "1.23456",
# "remainingQty": "1.23456",
# "remainingCost": "1.23456",
# "limitPrice": "1.23456",
# "stopPrice": "1.23456",
# "postOnly": False,
# "timeInForce": "GTC",
# "state": 1,
# "closeReason": "FILLED",
# "placedAt": 1556355722341,
# "closedAt": 1556355722341
# }
#
# createOrder
#
# market buy
#
# {
# "id":"ff81127c-8fd5-4846-b683-110639dcd322",
# "accountId":"6d445378-d8a3-4932-91cd-545d0a4ad2a2",
# "marketId":33,
# "type":1,
# "side":1,
# "cost":"25",
# "postOnly":false,
# "timeInForce":"GTC",
# "state":1,
# "placedAt":1589510846735
# }
#
# market sell, limit buy, limit sell
#
# {
# "id":"042a38b0-e369-4ad2-ae73-a18ff6b1dcf1",
# "accountId":"6d445378-d8a3-4932-91cd-545d0a4ad2a2",
# "marketId":33,
# "type":2,
# "side":1,
# "qty":"1000",
# "limitPrice":"100",
# "postOnly":false,
# "timeInForce":"GTC",
# "state":1,
# "placedAt":1589403938682,
# }
#
id = self.safe_string(order, 'id')
timestamp = self.safe_integer(order, 'placedAt')
marketId = self.safe_integer(order, 'marketId')
if marketId in self.markets_by_id:
market = self.markets_by_id[marketId]
symbol = None
if market is not None:
symbol = market['symbol']
status = self.parse_order_status(self.safe_string(order, 'state'))
if status == 'closed':
status = self.parse_order_status(self.safe_string(order, 'closeReason'))
orderSide = self.safe_string(order, 'side')
side = 'buy' if (orderSide == '1') else 'sell'
orderType = self.safe_string(order, 'type')
type = None
if orderType == '1':
type = 'market'
elif orderType == '2':
type = 'limit'
elif orderType == '3':
type = 'stopmarket'
else:
type = 'stoplimit'
price = self.safe_float(order, 'limitPrice')
amount = self.safe_float(order, 'qty')
remaining = self.safe_float(order, 'remainingQty')
filled = None
remainingCost = self.safe_float(order, 'remainingCost')
if (remainingCost is not None) and (remainingCost == 0.0):
remaining = 0
if (amount is not None) and (remaining is not None):
filled = max(0, amount - remaining)
cost = self.safe_float(order, 'cost')
if type == 'market':
if price == 0.0:
if (cost is not None) and (filled is not None):
if (cost > 0) and (filled > 0):
price = cost / filled
average = None
if cost is not None:
if filled:
average = cost / filled
return {
'info': order,
'id': id,
'clientOrderId': None,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'lastTradeTimestamp': None,
'symbol': symbol,
'type': type,
'side': side,
'price': price,
'amount': amount,
'cost': cost,
'average': average,
'filled': filled,
'remaining': remaining,
'status': status,
'fee': None,
'trades': None,
}
async def fetch_orders_by_state(self, state, symbol=None, since=None, limit=None, params={}):
now = self.milliseconds()
ninetyDays = 90 * 24 * 60 * 60 * 1000 # 90 days timerange max
request = {
'id': self.uid,
'state': state,
# 'side': Integer, # 1 = buy, 2 = sell
# 'offset': 0, # the number of records to skip
}
if since is None:
request['from'] = now - ninetyDays
request['to'] = now
else:
request['from'] = since
request['to'] = self.sum(since, ninetyDays)
await self.load_markets()
market = None
if symbol is not None:
market = self.market(symbol)
request['marketId'] = market['id']
if limit is not None:
request['limit'] = limit # default 50
response = await self.privateGetAccountsIdOrders(self.extend(request, params))
#
# [
# {
# "id": "30a2b5d0-be2e-4d0a-93ed-a7c45fed1792",
# "accountId": "30a2b5d0-be2e-4d0a-93ed-a7c45fed1792",
# "marketId": 123,
# "type": 1,
# "side": 1,
# "qty": "1.23456",
# "cost": "1.23456",
# "remainingQty": "1.23456",
# "remainingCost": "1.23456",
# "limitPrice": "1.23456",
# "stopPrice": "1.23456",
# "postOnly": False,
# "timeInForce": "GTC",
# "state": 1,
# "closeReason": "FILLED",
# "placedAt": 1556355722341,
# "closedAt": 1556355722341
# }
# ]
#
return self.parse_orders(response, market, since, limit)
async def fetch_closed_orders(self, symbol=None, since=None, limit=None, params={}):
return await self.fetch_orders_by_state('INACTIVE', symbol, since, limit, params)
async def fetch_open_orders(self, symbol=None, since=None, limit=None, params={}):
return await self.fetch_orders_by_state('ACTIVE', symbol, since, limit, params)
async def fetch_my_trades(self, symbol=None, since=None, limit=None, params={}):
now = self.milliseconds()
ninetyDays = 90 * 24 * 60 * 60 * 1000 # 90 days timerange max
request = {
'id': self.uid,
# 'side': Integer, # 1 = buy, 2 = sell
# 'offset': 0, # the number of records to skip
}
if since is None:
request['from'] = now - ninetyDays
request['to'] = now
else:
request['from'] = since
request['to'] = self.sum(since, ninetyDays)
await self.load_markets()
market = None
if symbol is not None:
market = self.market(symbol)
request['marketId'] = market['id']
if limit is not None:
request['limit'] = limit # default 50, max 200
response = await self.privateGetAccountsIdFills(self.extend(request, params))
#
# [
# {
# "id": 123,
# "marketId": 123,
# "side": 1,
# "qty": "1.23456",
# "price": "1.23456",
# "cost": "1.23456",
# "fee": "1.23456",
# "feeAsset": "XBASE",
# "liquidity": 1,
# "orderId": "30a2b5d0-be2e-4d0a-93ed-a7c45fed1792",
# "tradeId": 123,
# "filledAt": 1556355722341
# }
# ]
#
return self.parse_trades(response, market, since, limit)
async def fetch_order_trades(self, id, symbol=None, since=None, limit=None, params={}):
await self.load_markets()
request = {
'id': id,
}
trades = await self.privateGetOrdersIdFills(self.extend(request, params))
return self.parse_trades(trades)
async def create_order(self, symbol, type, side, amount, price=None, params={}):
await self.load_markets()
market = self.market(symbol)
uppercaseType = type.upper()
if uppercaseType == 'MARKET':
type = 1
elif uppercaseType == 'LIMIT':
type = 2
elif uppercaseType == 'STOPMARKET':
type = 3
elif uppercaseType == 'STOPLIMIT':
type = 4
uppercaseSide = side.upper()
side = uppercaseSide == 1 if 'BUY' else 2
request = {
'accountId': self.uid,
'marketId': market['id'],
'type': type,
'side': side,
# 'postOnly': False,
# 'timeInForce': 'GTC',
}
clientOrderId = self.safe_value_2(params, 'refId', 'clientOrderId')
query = params
if clientOrderId is not None:
request['refId'] = clientOrderId
query = self.omit(params, ['refId', 'clientOrderId'])
if | |
<reponame>ridicolos/featuretools
from woodwork.column_schema import ColumnSchema
from woodwork.logical_types import Boolean
from featuretools import primitives
from featuretools.entityset.relationship import Relationship, RelationshipPath
from featuretools.entityset.timedelta import Timedelta
from featuretools.feature_base.utils import is_valid_input
from featuretools.primitives.base import (
AggregationPrimitive,
PrimitiveBase,
TransformPrimitive
)
from featuretools.primitives.utils import serialize_primitive
from featuretools.utils.wrangle import (
_check_time_against_column,
_check_timedelta
)
_ES_REF = {}
class FeatureBase(object):
def __init__(self, dataframe, base_features, relationship_path, primitive, name=None, names=None):
"""Base class for all features
Args:
entityset (EntitySet): entityset this feature is being calculated for
dataframe (DataFrame): dataframe for calculating this feature
base_features (list[FeatureBase]): list of base features for primitive
relationship_path (RelationshipPath): path from this dataframe to the
dataframe of the base features.
primitive (:class:`.PrimitiveBase`): primitive to calculate. if not initialized when passed, gets initialized with no arguments
"""
assert all(isinstance(f, FeatureBase) for f in base_features), \
"All base features must be features"
self.dataframe_name = dataframe.ww.name
self.entityset = _ES_REF[dataframe.ww.metadata['entityset_id']]
self.base_features = base_features
# initialize if not already initialized
if not isinstance(primitive, PrimitiveBase):
primitive = primitive()
self.primitive = primitive
self.relationship_path = relationship_path
self._name = name
self._names = names
assert self._check_input_types(), ("Provided inputs don't match input "
"type requirements")
def __getitem__(self, key):
assert self.number_output_features > 1, \
'can only access slice of multi-output feature'
assert self.number_output_features > key, \
'index is higher than the number of outputs'
return FeatureOutputSlice(self, key)
@classmethod
def from_dictionary(cls, arguments, entityset, dependencies, primitives_deserializer):
raise NotImplementedError("Must define from_dictionary on FeatureBase subclass")
def rename(self, name):
"""Rename Feature, returns copy"""
feature_copy = self.copy()
feature_copy._name = name
feature_copy._names = None
return feature_copy
def copy(self):
raise NotImplementedError("Must define copy on FeatureBase subclass")
def get_name(self):
if not self._name:
self._name = self.generate_name()
return self._name
def get_feature_names(self):
if not self._names:
if self.number_output_features == 1:
self._names = [self.get_name()]
else:
self._names = self.generate_names()
if self.get_name() != self.generate_name():
self._names = [self.get_name() + '[{}]'.format(i) for i in range(len(self._names))]
return self._names
def get_function(self, **kwargs):
return self.primitive.get_function(**kwargs)
def get_dependencies(self, deep=False, ignored=None, copy=True):
"""Returns features that are used to calculate this feature
..note::
If you only want the features that make up the input to the feature
function use the base_features attribute instead.
"""
deps = []
for d in self.base_features[:]:
deps += [d]
if hasattr(self, "where") and self.where:
deps += [self.where]
if ignored is None:
ignored = set([])
deps = [d for d in deps if d.unique_name() not in ignored]
if deep:
for dep in deps[:]: # copy so we don't modify list we iterate over
deep_deps = dep.get_dependencies(deep, ignored)
deps += deep_deps
return deps
def get_depth(self, stop_at=None):
"""Returns depth of feature"""
max_depth = 0
stop_at_set = set()
if stop_at is not None:
stop_at_set = set([i.unique_name() for i in stop_at])
if self.unique_name() in stop_at_set:
return 0
for dep in self.get_dependencies(deep=True, ignored=stop_at_set):
max_depth = max(dep.get_depth(stop_at=stop_at),
max_depth)
return max_depth + 1
def _check_input_types(self):
if len(self.base_features) == 0:
return True
input_types = self.primitive.input_types
if input_types is not None:
if type(input_types[0]) != list:
input_types = [input_types]
for t in input_types:
zipped = list(zip(t, self.base_features))
if all([is_valid_input(f.column_schema, t) for t, f in zipped]):
return True
else:
return True
return False
@property
def dataframe(self):
"""Dataframe this feature belongs too"""
return self.entityset[self.dataframe_name]
@property
def number_output_features(self):
return self.primitive.number_output_features
def __repr__(self):
return "<Feature: %s>" % (self.get_name())
def hash(self):
return hash(self.get_name() + self.dataframe_name)
def __hash__(self):
return self.hash()
@property
def column_schema(self):
feature = self
column_schema = self.primitive.return_type
while column_schema is None:
# get column_schema of first base feature
base_feature = feature.base_features[0]
column_schema = base_feature.column_schema
# only the original time index should exist
# so make this feature's return type just a Datetime
if 'time_index' in column_schema.semantic_tags:
column_schema = ColumnSchema(logical_type=column_schema.logical_type,
semantic_tags=column_schema.semantic_tags - {"time_index"})
elif 'index' in column_schema.semantic_tags:
column_schema = ColumnSchema(logical_type=column_schema.logical_type,
semantic_tags=column_schema.semantic_tags - {"index"})
# Need to add back in the numeric standard tag so the schema can get recognized
# as a valid return type
if column_schema.is_numeric:
column_schema.semantic_tags.add('numeric')
if column_schema.is_categorical:
column_schema.semantic_tags.add('category')
# direct features should keep the foreign key tag, but all other features should get converted
if not isinstance(feature, DirectFeature) and 'foreign_key' in column_schema.semantic_tags:
column_schema = ColumnSchema(logical_type=column_schema.logical_type,
semantic_tags=column_schema.semantic_tags - {"foreign_key"})
feature = base_feature
return column_schema
@property
def default_value(self):
return self.primitive.default_value
def get_arguments(self):
raise NotImplementedError("Must define get_arguments on FeatureBase subclass")
def to_dictionary(self):
return {
'type': type(self).__name__,
'dependencies': [dep.unique_name() for dep in self.get_dependencies()],
'arguments': self.get_arguments(),
}
def _handle_binary_comparision(self, other, Primitive, PrimitiveScalar):
if isinstance(other, FeatureBase):
return Feature([self, other], primitive=Primitive)
return Feature([self], primitive=PrimitiveScalar(other))
def __eq__(self, other):
"""Compares to other by equality"""
return self._handle_binary_comparision(other, primitives.Equal, primitives.EqualScalar)
def __ne__(self, other):
"""Compares to other by non-equality"""
return self._handle_binary_comparision(other, primitives.NotEqual, primitives.NotEqualScalar)
def __gt__(self, other):
"""Compares if greater than other"""
return self._handle_binary_comparision(other, primitives.GreaterThan, primitives.GreaterThanScalar)
def __ge__(self, other):
"""Compares if greater than or equal to other"""
return self._handle_binary_comparision(other, primitives.GreaterThanEqualTo, primitives.GreaterThanEqualToScalar)
def __lt__(self, other):
"""Compares if less than other"""
return self._handle_binary_comparision(other, primitives.LessThan, primitives.LessThanScalar)
def __le__(self, other):
"""Compares if less than or equal to other"""
return self._handle_binary_comparision(other, primitives.LessThanEqualTo, primitives.LessThanEqualToScalar)
def __add__(self, other):
"""Add other"""
return self._handle_binary_comparision(other, primitives.AddNumeric, primitives.AddNumericScalar)
def __radd__(self, other):
return self.__add__(other)
def __sub__(self, other):
"""Subtract other"""
return self._handle_binary_comparision(other, primitives.SubtractNumeric, primitives.SubtractNumericScalar)
def __rsub__(self, other):
return Feature([self], primitive=primitives.ScalarSubtractNumericFeature(other))
def __div__(self, other):
"""Divide by other"""
return self._handle_binary_comparision(other, primitives.DivideNumeric, primitives.DivideNumericScalar)
def __truediv__(self, other):
return self.__div__(other)
def __rtruediv__(self, other):
return self.__rdiv__(other)
def __rdiv__(self, other):
return Feature([self], primitive=primitives.DivideByFeature(other))
def __mul__(self, other):
"""Multiply by other"""
if isinstance(other, FeatureBase):
if all([isinstance(f.column_schema.logical_type, Boolean)
for f in (self, other)]):
return Feature([self, other], primitive=primitives.MultiplyBoolean)
return self._handle_binary_comparision(other, primitives.MultiplyNumeric, primitives.MultiplyNumericScalar)
def __rmul__(self, other):
return self.__mul__(other)
def __mod__(self, other):
"""Take modulus of other"""
return self._handle_binary_comparision(other, primitives.ModuloNumeric, primitives.ModuloNumericScalar)
def __rmod__(self, other):
return Feature([self], primitive=primitives.ModuloByFeature(other))
def __and__(self, other):
return self.AND(other)
def __rand__(self, other):
return Feature([other, self], primitive=primitives.And)
def __or__(self, other):
return self.OR(other)
def __ror__(self, other):
return Feature([other, self], primitive=primitives.Or)
def __not__(self, other):
return self.NOT(other)
def __abs__(self):
return Feature([self], primitive=primitives.Absolute)
def __neg__(self):
return Feature([self], primitive=primitives.Negate)
def AND(self, other_feature):
"""Logical AND with other_feature"""
return Feature([self, other_feature], primitive=primitives.And)
def OR(self, other_feature):
"""Logical OR with other_feature"""
return Feature([self, other_feature], primitive=primitives.Or)
def NOT(self):
"""Creates inverse of feature"""
return Feature([self], primitive=primitives.Not)
def isin(self, list_of_output):
return Feature([self], primitive=primitives.IsIn(list_of_outputs=list_of_output))
def is_null(self):
"""Compares feature to null by equality"""
return Feature([self], primitive=primitives.IsNull)
def __invert__(self):
return self.NOT()
def unique_name(self):
return u"%s: %s" % (self.dataframe_name, self.get_name())
def relationship_path_name(self):
return self.relationship_path.name
class IdentityFeature(FeatureBase):
"""Feature for dataframe that is equivalent to underlying column"""
def __init__(self, column, name=None):
self.column_name = column.ww.name
self.return_type = column.ww.schema
metadata = column.ww.schema._metadata
es = _ES_REF[metadata['entityset_id']]
super(IdentityFeature, self).__init__(dataframe=es[metadata['dataframe_name']],
base_features=[],
relationship_path=RelationshipPath([]),
primitive=PrimitiveBase,
name=name)
@classmethod
def from_dictionary(cls, arguments, entityset, dependencies, primitives_deserializer):
dataframe_name = arguments['dataframe_name']
column_name = arguments['column_name']
column = entityset[dataframe_name].ww[column_name]
return cls(column=column, name=arguments['name'])
def copy(self):
"""Return copy of feature"""
return IdentityFeature(self.entityset[self.dataframe_name].ww[self.column_name])
def generate_name(self):
return self.column_name
def get_depth(self, stop_at=None):
return 0
def get_arguments(self):
return {
'name': self._name,
'column_name': self.column_name,
'dataframe_name': self.dataframe_name,
}
@property
def column_schema(self):
return self.return_type
class DirectFeature(FeatureBase):
"""Feature for child dataframe that inherits
a feature value from a parent dataframe"""
input_types = [ColumnSchema()]
return_type = None
def __init__(self, base_feature, child_dataframe_name, relationship=None, name=None):
base_feature = _validate_base_features(base_feature)[0]
self.parent_dataframe_name = base_feature.dataframe_name
relationship = self._handle_relationship(base_feature.entityset, child_dataframe_name, relationship)
child_dataframe = base_feature.entityset[child_dataframe_name]
super(DirectFeature, self).__init__(dataframe=child_dataframe,
base_features=[base_feature],
relationship_path=RelationshipPath([(True, relationship)]),
primitive=PrimitiveBase,
name=name)
def _handle_relationship(self, entityset, child_dataframe_name, relationship):
child_dataframe = entityset[child_dataframe_name]
if relationship:
relationship_child = relationship.child_dataframe
assert child_dataframe.ww.name == relationship_child.ww.name, \
'child_dataframe must be the relationship child dataframe'
assert self.parent_dataframe_name == relationship.parent_dataframe.ww.name, \
'Base feature must be defined on the relationship parent dataframe'
else:
child_relationships = entityset.get_forward_relationships(child_dataframe.ww.name)
possible_relationships = (r for r in child_relationships
if r.parent_dataframe.ww.name == self.parent_dataframe_name)
relationship = next(possible_relationships, None)
if not relationship:
raise RuntimeError('No relationship from "%s" to "%s" found.'
% (child_dataframe.ww.name, self.parent_dataframe_name))
# Check for another path.
elif next(possible_relationships, None):
message = "There are multiple relationships to the base dataframe. " \
"You must specify a relationship."
raise RuntimeError(message)
return relationship
@classmethod
def from_dictionary(cls, arguments, entityset, dependencies, primitives_deserializer):
base_feature = dependencies[arguments['base_feature']]
relationship = Relationship.from_dictionary(arguments['relationship'], entityset)
child_dataframe_name = relationship.child_dataframe.ww.name
return cls(base_feature=base_feature,
child_dataframe_name=child_dataframe_name,
relationship=relationship,
name=arguments['name'])
@property
def number_output_features(self):
return self.base_features[0].number_output_features
@property
def default_value(self):
return self.base_features[0].default_value
def copy(self):
"""Return copy of feature"""
_is_forward, relationship = self.relationship_path[0]
return DirectFeature(self.base_features[0], self.dataframe_name,
relationship=relationship)
@property
def column_schema(self):
return self.base_features[0].column_schema
def generate_name(self):
return self._name_from_base(self.base_features[0].get_name())
def generate_names(self):
return [self._name_from_base(base_name)
for base_name in self.base_features[0].get_feature_names()]
def get_arguments(self):
_is_forward, relationship = self.relationship_path[0]
return {
'name': self._name,
'base_feature': self.base_features[0].unique_name(),
'relationship': relationship.to_dictionary(),
}
def _name_from_base(self, base_name):
return u"%s.%s" % (self.relationship_path_name(), base_name)
class AggregationFeature(FeatureBase):
# Feature to condition this feature | |
# Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import multiprocessing
import os
from dataclasses import dataclass
from typing import Any, Collection, List, Optional, Tuple, Union
from weakref import proxy
from torch.utils.data import DataLoader, RandomSampler, Sampler, SequentialSampler
from torch.utils.data.distributed import DistributedSampler
import pytorch_lightning as pl
from pytorch_lightning.overrides.distributed import UnrepeatedDistributedSampler
from pytorch_lightning.strategies import DDPSpawnStrategy
from pytorch_lightning.trainer.states import RunningStage, TrainerFn
from pytorch_lightning.trainer.supporters import CombinedLoader, CycleIterator
from pytorch_lightning.utilities.apply_func import apply_to_collection
from pytorch_lightning.utilities.auto_restart import _validate_fault_tolerant_automatic
from pytorch_lightning.utilities.data import (
_auto_add_worker_init_fn,
_is_dataloader_shuffled,
_replace_dataloader_init_method,
_update_dataloader,
has_iterable_dataset,
has_len_all_ranks,
)
from pytorch_lightning.utilities.exceptions import MisconfigurationException
from pytorch_lightning.utilities.imports import _fault_tolerant_training
from pytorch_lightning.utilities.model_helpers import is_overridden
from pytorch_lightning.utilities.rank_zero import rank_zero_deprecation, rank_zero_warn
from pytorch_lightning.utilities.types import EVAL_DATALOADERS, TRAIN_DATALOADERS
from pytorch_lightning.utilities.warnings import PossibleUserWarning
class DataConnector:
def __init__(self, trainer: "pl.Trainer", multiple_trainloader_mode: str = "max_size_cycle"):
self.trainer = trainer
self.multiple_trainloader_mode = multiple_trainloader_mode
self._train_dataloader_source = _DataLoaderSource(None, "")
self._val_dataloader_source = _DataLoaderSource(None, "")
self._test_dataloader_source = _DataLoaderSource(None, "")
self._predict_dataloader_source = _DataLoaderSource(None, "")
@property
def _should_reload_train_dl(self) -> bool:
"""Check if train dataloader should be reloaded."""
n_epochs = self.trainer.reload_dataloaders_every_n_epochs
return n_epochs and (self.trainer.current_epoch - self.trainer._last_train_dl_reload_epoch >= n_epochs)
@property
def _should_reload_val_dl(self) -> bool:
"""Check if validation dataloader should be reloaded."""
n_epochs = self.trainer.reload_dataloaders_every_n_epochs
return n_epochs and (self.trainer.current_epoch - self.trainer._last_val_dl_reload_epoch >= n_epochs)
def on_trainer_init(
self,
check_val_every_n_epoch: int,
reload_dataloaders_every_n_epochs: int,
prepare_data_per_node: Optional[bool] = None,
) -> None:
self.trainer.datamodule = None
if prepare_data_per_node is not None:
rank_zero_deprecation(
"Setting `prepare_data_per_node` with the trainer flag is deprecated in v1.5.0 and will be removed in"
" v1.7.0. Please set `prepare_data_per_node` in `LightningDataModule` and/or `LightningModule`"
" directly instead."
)
self.trainer.prepare_data_per_node = prepare_data_per_node
if not isinstance(check_val_every_n_epoch, int):
raise MisconfigurationException(
f"check_val_every_n_epoch should be an integer. Found {check_val_every_n_epoch}"
)
self.trainer.check_val_every_n_epoch = check_val_every_n_epoch
if not isinstance(reload_dataloaders_every_n_epochs, int) or (reload_dataloaders_every_n_epochs < 0):
raise MisconfigurationException(
f"`reload_dataloaders_every_n_epochs` should be an int >= 0, got {reload_dataloaders_every_n_epochs}."
)
self.trainer.reload_dataloaders_every_n_epochs = reload_dataloaders_every_n_epochs
self.trainer._is_data_prepared = False
def prepare_data(self) -> None:
# on multi-gpu jobs we only want to manipulate (download, etc) on node_rank=0, local_rank=0
# or in the case where each node needs to do its own manipulation in which case just local_rank=0
local_rank_zero = self.trainer.local_rank == 0
global_rank_zero = self.trainer.local_rank == 0 and self.trainer.node_rank == 0
datamodule = self.trainer.datamodule
lightning_module = self.trainer.lightning_module
# handle datamodule prepare data:
# check for prepare_data_per_node & datamodule lifecycle properties before calling datamodule.prepare_data
if datamodule is not None:
dm_prepare_data_per_node = datamodule.prepare_data_per_node
dm_eq_prepare_data = datamodule.prepare_data_per_node == self.trainer.prepare_data_per_node
if self.trainer.prepare_data_per_node is not None and not dm_eq_prepare_data:
raise MisconfigurationException(
"Inconsistent settings found for `prepare_data_per_node`."
f" Value was set with both `Trainer(prepare_data_per_node={self.trainer.prepare_data_per_node}.)`"
f" and `DataModule.prepare_data_per_node={datamodule.prepare_data_per_node}`."
" Move `prepare_data_per_node` setting to DataModule property."
)
if (dm_prepare_data_per_node and local_rank_zero) or (not dm_prepare_data_per_node and global_rank_zero):
self.trainer.datamodule.prepare_data()
# handle lightning module prepare data:
# check for prepare_data_per_node before calling lightning_module.prepare_data
if lightning_module is not None:
lm_prepare_data_per_node = lightning_module.prepare_data_per_node
lm_eq_prepare_data = lightning_module.prepare_data_per_node == self.trainer.prepare_data_per_node
if (self.trainer.prepare_data_per_node is not None) and not lm_eq_prepare_data:
raise MisconfigurationException(
"Inconsistent settings found for `prepare_data_per_node`."
f" Value was set with both `Trainer(prepare_data_per_node={self.trainer.prepare_data_per_node}.)`"
f" and `LightningModule.prepare_data_per_node={lightning_module.prepare_data_per_node}`."
" Move `prepare_data_per_node` setting to LightningModule property."
)
if (lm_prepare_data_per_node and local_rank_zero) or (not lm_prepare_data_per_node and global_rank_zero):
self.trainer._call_lightning_module_hook("prepare_data")
self.trainer._is_data_prepared = True
def attach_data(
self,
model: "pl.LightningModule",
train_dataloaders: Optional[TRAIN_DATALOADERS] = None,
val_dataloaders: Optional[EVAL_DATALOADERS] = None,
test_dataloaders: Optional[EVAL_DATALOADERS] = None,
predict_dataloaders: Optional[EVAL_DATALOADERS] = None,
datamodule: Optional["pl.LightningDataModule"] = None,
) -> None:
# set up the passed in dataloaders (if needed)
self.attach_dataloaders(
model,
train_dataloaders=train_dataloaders,
val_dataloaders=val_dataloaders,
test_dataloaders=test_dataloaders,
predict_dataloaders=predict_dataloaders,
)
self.attach_datamodule(model, datamodule=datamodule)
# set local properties on the model
self._copy_trainer_model_properties(model)
def _copy_trainer_model_properties(self, model):
ref_model = self.trainer.lightning_module or model
for m in [model, ref_model]:
m.trainer = proxy(self.trainer)
m.use_amp = self.trainer.amp_backend is not None
m.precision = self.trainer.precision
def attach_dataloaders(
self,
model: "pl.LightningModule",
train_dataloaders: Optional[TRAIN_DATALOADERS] = None,
val_dataloaders: Optional[EVAL_DATALOADERS] = None,
test_dataloaders: Optional[EVAL_DATALOADERS] = None,
predict_dataloaders: Optional[EVAL_DATALOADERS] = None,
) -> None:
self.trainer.train_dataloader = None
self.trainer.val_dataloaders = None
self.trainer.test_dataloaders = None
self.trainer.predict_dataloaders = None
self._train_dataloader_source = _DataLoaderSource(
train_dataloaders if train_dataloaders is not None else model, "train_dataloader"
)
self._val_dataloader_source = _DataLoaderSource(
val_dataloaders if val_dataloaders is not None else model, "val_dataloader"
)
self._test_dataloader_source = _DataLoaderSource(
test_dataloaders if test_dataloaders is not None else model, "test_dataloader"
)
self._predict_dataloader_source = _DataLoaderSource(
predict_dataloaders if predict_dataloaders is not None else model, "predict_dataloader"
)
def attach_datamodule(
self, model: "pl.LightningModule", datamodule: Optional["pl.LightningDataModule"] = None
) -> None:
# If we have a datamodule, attach necessary hooks + dataloaders
if datamodule is None:
return
self._train_dataloader_source = _DataLoaderSource(datamodule, "train_dataloader")
self._val_dataloader_source = _DataLoaderSource(datamodule, "val_dataloader")
self._test_dataloader_source = _DataLoaderSource(datamodule, "test_dataloader")
self._predict_dataloader_source = _DataLoaderSource(datamodule, "predict_dataloader")
# Override data transfer hooks if dataset-specific to_device logic has been defined in datamodule
batch_transfer_hooks = ("on_before_batch_transfer", "transfer_batch_to_device", "on_after_batch_transfer")
for hook in batch_transfer_hooks:
if is_overridden(hook, datamodule):
setattr(model, hook, getattr(datamodule, hook))
self.trainer.datamodule = datamodule
datamodule.trainer = self.trainer
# experimental feature for Flash
if hasattr(datamodule, "data_pipeline"):
model.data_pipeline = datamodule.data_pipeline
def _worker_check(self, dataloader: DataLoader, name: str) -> None:
if not isinstance(dataloader, DataLoader):
return
using_spawn = isinstance(self.trainer.strategy, DDPSpawnStrategy)
num_cpus = multiprocessing.cpu_count()
# ddp_spawn + num_workers > 0 don't mix! tell the user
if dataloader.num_workers > 0 and using_spawn:
# checks for the attr persistent_workers available in pytorch >= 1.7
if hasattr(dataloader, "persistent_workers"):
if not dataloader.persistent_workers:
rank_zero_warn(
"num_workers>0, persistent_workers=False, and strategy=ddp_spawn"
" may result in data loading bottlenecks."
" Consider setting persistent_workers=True"
" (this is a limitation of Python .spawn() and PyTorch)"
)
else:
rank_zero_warn(
"num_workers>0 and strategy=ddp_spawn do not mix well"
" and may result in data loading bottlenecks."
" Consider setting strategy=ddp to use num_workers>0"
" (this is a limitation of Python .spawn() and PyTorch)"
)
elif dataloader.num_workers == 0 and using_spawn:
# checks for the attr persistent_workers available in pytorch >= 1.7
if hasattr(dataloader, "persistent_workers"):
if not dataloader.persistent_workers:
rank_zero_warn(
"strategy=ddp_spawn and num_workers=0 may result in data loading bottlenecks."
" Consider setting num_workers>0 and persistent_workers=True"
)
else:
rank_zero_warn(
"strategy=ddp_spawn and num_workers=0 may result in data loading bottlenecks."
" Consider setting strategy=ddp and set num_workers>0"
)
elif dataloader.num_workers <= 2 < num_cpus and not using_spawn:
# if changed, update the `filterwarnings` snippet in 'speed.html#num-workers'
rank_zero_warn(
f"The dataloader, {name}, does not have many workers which may be a bottleneck."
" Consider increasing the value of the `num_workers` argument`"
f" (try {num_cpus} which is the number of cpus on this machine)"
" in the `DataLoader` init to improve performance.",
category=PossibleUserWarning,
)
def _requires_distributed_sampler(self, dataloader) -> bool:
return (
self.trainer._accelerator_connector.replace_sampler_ddp
and self.trainer._accelerator_connector.is_distributed
and not isinstance(dataloader.sampler, DistributedSampler)
and not has_iterable_dataset(dataloader)
)
# TODO: shuffle here is kept for BC. Remove it once data_loading.py is removed (#11248)
def _prepare_dataloader(
self, dataloader: Any, shuffle: Optional[bool] = None, mode: Optional[RunningStage] = None
) -> Any:
"""This function handles to following functionalities:
- Injecting a `DistributedDataSampler` into the `DataLoader` if on a distributed environment
- Wrapping the datasets and samplers into fault-tolerant components
"""
if isinstance(dataloader, CombinedLoader):
# apply `_prepare_dataloader` on all the collection of loaders
dataloader.loaders = apply_to_collection(
dataloader.loaders, (DataLoader, CycleIterator), self._prepare_dataloader, shuffle, mode=mode
)
# the length need to recomputed across all dataloaders in case of special behavior.
dataloader._apply_cycle_iterator_length()
return dataloader
# don't do anything if it's not a dataloader
if not isinstance(dataloader, (DataLoader, CycleIterator)):
return dataloader
cycle_iterator: Optional[CycleIterator] = None
if isinstance(dataloader, CycleIterator):
cycle_iterator = dataloader
dataloader = dataloader.loader
if (
_fault_tolerant_training() # injects components to track the state
or self._requires_distributed_sampler(dataloader) # sets the distributed sampler
or mode == RunningStage.PREDICTING # to track indices for the predictions
or self.trainer._accelerator_connector.use_ipu # IPUs use a custom `DataLoader`
):
if shuffle is None:
# for training, set to True always
# for evaluation, decide based on existing sampler
shuffle = True if mode == RunningStage.TRAINING else _is_dataloader_shuffled(dataloader)
sampler = self._resolve_sampler(dataloader, shuffle=shuffle, mode=mode)
dataloader = _update_dataloader(dataloader, sampler, mode=mode)
if cycle_iterator is not None:
cycle_iterator.loader = dataloader
return cycle_iterator
return dataloader
def _resolve_sampler(self, dataloader: DataLoader, shuffle: bool, mode: Optional[RunningStage] = None) -> Sampler:
if self._requires_distributed_sampler(dataloader):
if not | |
# preferences.py
#
# MIT License
#
# Copyright (c) 2020 <NAME> <<EMAIL>>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import logging
import os
import socket
from gi.repository import Granite, Gtk, Gdk
from .config import APP_ID, APP_TITLE
from .config import ApplicationSettings
from .config import (
DEFAULT_AUTOSTART_ENTRY_FILE,
DEFAULT_PREFS_WIDTH,
DEFAULT_PREFS_HEIGHT,
)
from .config import (
SETTINGS_KEY_DOCKER_ENDPOINT,
SETTINGS_KEY_KUBECONFIG,
SETTINGS_KEY_START_ON_LOGIN,
SETTINGS_KEY_REG_ADDRESS,
SETTINGS_KEY_REG_VOL,
SETTINGS_KEY_REG_MODE,
SETTINGS_KEY_CREATE_HOOK,
SETTINGS_KEY_DESTROY_HOOK,
SETTINGS_KEY_K3D_IMAGE,
SETTINGS_KEY_K3S_ARGS,
)
from .docker import is_valid_docker_name, is_valid_docker_host
from .utils import parse_registry, RegistryInvalidError
from .utils_ui import SettingsPage, show_error_dialog, show_warning_dialog
SETTINGS_REG_LOCAL = "Regular registry"
SETTINGS_REG_CACHE = "Only pull-through cache"
###############################################################################
# Errors
###############################################################################
class PreferencesError(Exception):
"""
A configuration error
"""
def __init__(self, setting, message):
self.setting = setting
self.message = message
class PreferencesWarning(Exception):
"""
A configuration warning
"""
def __init__(self, setting, message):
self.setting = setting
self.message = message
###############################################################################
# Startup entry
###############################################################################
class K3dvStartupEntry(object):
contents = f"""
[Desktop Entry]
Name=k3d
Exec=flatpak run --user {APP_ID}
Terminal=false
Type=Application
Categories=System;
StartupNotify=true
X-GNOME-Autostart-enabled=true
"""
def __init__(self):
super(K3dvStartupEntry, self).__init__()
self._filename = DEFAULT_AUTOSTART_ENTRY_FILE
def create(self):
logging.info(f"Creating/over-writting desktop entry file {self._filename}")
with open(self._filename, "w") as out:
out.write(self.contents)
def delete(self):
if os.path.exists(self._filename):
logging.info(f"Removing desktop entry file {self._filename}")
os.remove(self._filename)
###############################################################################
# Preferences
###############################################################################
class PreferencesDialog(Gtk.Window):
def __init__(self, docker):
super().__init__()
self.set_default_size(DEFAULT_PREFS_WIDTH, DEFAULT_PREFS_HEIGHT)
self.set_resizable(False)
self.set_border_width(10)
self.set_gravity(Gdk.Gravity.CENTER)
self.set_position(Gtk.WindowPosition.CENTER)
self._settings = ApplicationSettings(APP_ID)
self._docker = docker
self.view = PreferencesPanedView(settings=self._settings, docker=self._docker)
self.add(self.view)
self.header = Gtk.HeaderBar()
self.header.set_show_close_button(False)
self.header.set_title("Preferences")
self.header.get_style_context().remove_class("header-bar")
self.header.get_style_context().add_class("titlebar")
self.header.get_style_context().add_class("background")
self.set_titlebar(self.header)
# add a "Cancel" button
cancel_button = Gtk.Button(label="Cancel")
cancel_button.connect("clicked", self.on_cancel_clicked)
self.header.pack_start(cancel_button)
# add a "Defaults" button
defaults_button = Gtk.Button(label="Defaults")
defaults_button.connect("clicked", self.on_defaults_clicked)
defaults_button.get_style_context().add_class(
Gtk.STYLE_CLASS_DESTRUCTIVE_ACTION
)
self.header.pack_start(defaults_button)
# ... and a "Apply" button
apply_button = Gtk.Button(label="Apply")
apply_button.connect("clicked", self.on_apply_clicked)
apply_button.get_style_context().add_class(Gtk.STYLE_CLASS_SUGGESTED_ACTION)
self.header.pack_end(apply_button)
def show_all(self):
super(PreferencesDialog, self).show_all()
self._settings.delay()
def on_apply_clicked(self, *args):
"""
The user has pressed the "Apply" button
"""
logging.info("Applying changes in preferences")
try:
self.view.on_validate()
except PreferencesError as e:
show_error_dialog(
msg="Preferences validation error", explanation=f"{e.message}"
)
if e.setting is not None:
self._settings.reset(e.setting)
return
except PreferencesWarning as e:
show_warning_dialog(msg="Preferences warning", explanation=f"{e.message}")
try:
self.view.on_apply()
except Exception as e:
show_error_dialog(msg="Saving error", explanation=f"{e}")
return
self._settings.apply()
self.hide()
def on_defaults_clicked(self, *args):
"""
The user has pressed the "Defaults" button
"""
logging.info("Resetting to default values")
self.view.set_defaults()
def on_cancel_clicked(self, *args):
"""
The user has pressed the "Cancel" button
"""
logging.info("Preferences changed canceled: reverting changes")
self._settings.revert()
self.hide()
# Things that could be configurable:
# TODO: keyboard shortcuts
# TODO: auto Helm charts
# TODO: auto deployed manifests (see https://rancher.com/docs/k3s/latest/en/advanced/)
# TODO: viewer for clusters (ie, Lens, chrome...)
class PreferencesPanedView(Gtk.Paned):
"""
Panel for preferences
"""
def __init__(self, settings, docker):
super().__init__()
self.set_halign(Gtk.Align.FILL)
self.set_valign(Gtk.Align.FILL)
self.set_hexpand(True)
self._settings = settings
self._docker = docker
self.general_preferences = GeneralSettingsPage(
settings=self._settings, docker=self._docker
)
self.registry_preferences = RegistrySettingsPage(
settings=self._settings, docker=self._docker
)
self.k3s_preferences = K3sSettingsPage(
settings=self._settings, docker=self._docker
)
self.hooks_preferences = HooksSettingsPage(
settings=self._settings, docker=self._docker
)
self.stack = Gtk.Stack()
self.stack.set_halign(Gtk.Align.FILL)
self.stack.set_valign(Gtk.Align.FILL)
self.stack.set_hexpand(True)
self.stack.set_homogeneous(True)
self.stack.add_named(self.general_preferences, "settings_page")
self.stack.add_named(self.registry_preferences, "registry_page")
self.stack.add_named(self.k3s_preferences, "k3s_page")
self.stack.add_named(self.hooks_preferences, "hooks_page")
self.stack.set_transition_type(Gtk.StackTransitionType.CROSSFADE)
self.stack.set_transition_duration(100)
self.settings_sidebar = Granite.SettingsSidebar(stack=self.stack)
self.add(self.settings_sidebar)
self.add(self.stack)
def on_validate(self):
self.general_preferences.on_validate()
self.registry_preferences.on_validate()
self.k3s_preferences.on_validate()
self.hooks_preferences.on_validate()
def on_apply(self):
self.general_preferences.on_apply()
self.registry_preferences.on_apply()
self.k3s_preferences.on_apply()
self.hooks_preferences.on_apply()
def set_defaults(self):
self.general_preferences.set_defaults()
self.registry_preferences.set_defaults()
self.k3s_preferences.set_defaults()
self.hooks_preferences.set_defaults()
###############################################################################
# General settings
###############################################################################
class GeneralSettingsPage(SettingsPage):
_managed_settings = [
SETTINGS_KEY_KUBECONFIG,
SETTINGS_KEY_DOCKER_ENDPOINT,
SETTINGS_KEY_START_ON_LOGIN,
]
def __init__(self, **kwargs):
self._docker = kwargs.pop("docker", None)
super().__init__(
activatable=False,
description="General settings",
header="General",
icon_name="preferences-desktop",
title="General",
**kwargs,
)
# The Kubeconfig
self.kubeconfig_entry = Gtk.Entry()
self.kubeconfig_entry.props.hexpand = False
self.kubeconfig_entry.props.halign = Gtk.Align.START
self.kubeconfig_entry.set_tooltip_text(
"The KUBECONFIG file. You should use something like "
"~/.kube/config for having it automatically loaded by kubectl. "
"It is important to note that this file WILL BE OVERWRITTEN. "
"You should choose a different file if you are using some cloud providers "
"or cluster created with some other tools."
)
self.append_labeled_entry(
"Kubeconfig file:", self.kubeconfig_entry, SETTINGS_KEY_KUBECONFIG
)
# The docker entrypoint
self.docker_endpoint_entry = Gtk.Entry()
self.docker_endpoint_entry.props.hexpand = False
self.docker_endpoint_entry.set_tooltip_text(
"Docker endpoint, like unix:///var/run/docker.sock or "
"tcp:192.168.1.10:1111"
)
self.append_labeled_entry(
"Docker URL:", self.docker_endpoint_entry, SETTINGS_KEY_DOCKER_ENDPOINT
)
# Start on login
self.start_login_checkbutton = Gtk.Switch()
self.start_login_checkbutton.set_tooltip_text(
f"When enabled, {APP_TITLE} is started on login"
)
self.append_labeled_entry(
"Start on login:", self.start_login_checkbutton, SETTINGS_KEY_START_ON_LOGIN
)
def on_apply(self):
start_on_login = self._settings.get_boolean(SETTINGS_KEY_START_ON_LOGIN)
startup = K3dvStartupEntry()
if start_on_login:
startup.create()
else:
startup.delete()
def on_validate(self):
dh = self._settings.get_safe_string(SETTINGS_KEY_DOCKER_ENDPOINT)
if not is_valid_docker_host(dh):
raise PreferencesError(
SETTINGS_KEY_DOCKER_ENDPOINT,
f"'<tt>{dh}</tt>' is not a valid Docker URL. Please verify that\n\n"
"1) Docker is installed and running\n"
f"3) '<tt>{dh}</tt>' exists and is accessible\n"
"3) the Docker URL in the <b><i>Preferences</i></b> is correct.",
)
###############################################################################
# Advanced settings: common settings for the registry
###############################################################################
class RegistrySettingsPage(SettingsPage):
_managed_settings = [
SETTINGS_KEY_REG_MODE,
SETTINGS_KEY_REG_ADDRESS,
SETTINGS_KEY_REG_VOL,
]
def __init__(self, **kwargs):
self._docker = kwargs.pop("docker", None)
super().__init__(
activatable=False,
description="Local registry",
header="Advanced settings",
icon_name="folder-remote",
title="Registry",
**kwargs,
)
registry_mode = Gtk.ListStore(str)
registry_mode.append([SETTINGS_REG_LOCAL])
registry_mode.append([SETTINGS_REG_CACHE])
self.registry_mode = Gtk.ComboBox.new_with_model(registry_mode)
renderer_text = Gtk.CellRendererText()
self.registry_mode.pack_start(renderer_text, True)
self.registry_mode.add_attribute(renderer_text, "text", 0)
self.registry_mode.set_tooltip_text(
"When configured as a pull-through cache, the local Docker registry will act "
"as a local cache of all the images that are downloaded "
"from the Docker Hub, but you cannot 'push' to this registry. "
)
self.append_labeled_entry(
"Local registry mode:", self.registry_mode, SETTINGS_KEY_REG_MODE
)
# Registry hostname
self.registry_name_entry = Gtk.Entry()
self.registry_name_entry.hexpand = True
self.append_labeled_entry(
"Registry Name/Port:", self.registry_name_entry, SETTINGS_KEY_REG_ADDRESS
)
# Registry volume
self.registry_volume_entry = Gtk.Entry()
self.registry_volume_entry.hexpand = True
self.registry_volume_entry.set_tooltip_text("Volume for saving the images.")
self.append_labeled_entry(
"Volume for images:", self.registry_volume_entry, SETTINGS_KEY_REG_VOL
)
def on_validate(self):
"""
Validate the registry configuration
"""
logging.debug("[PREFERENCES] Validating registry can be parsed...")
registry_address = self._settings.get_safe_string(SETTINGS_KEY_REG_ADDRESS)
try:
parsed_registry = parse_registry(registry_address)
if parsed_registry is None:
return
registry_name, registry_port = parsed_registry
except RegistryInvalidError as e:
raise PreferencesError(
SETTINGS_KEY_REG_ADDRESS,
f"'<b><tt>{registry_address}</tt></b>' does not seem a valid registry."
"\n\n"
f"'{registry_address}' could not be parsed as a valid registry <tt>ADDRESS:PORT</tt>.",
)
logging.debug("[PREFERENCES] Validating registry volume...")
registry_volume = self._settings.get_safe_string(SETTINGS_KEY_REG_VOL)
if registry_volume is not None and len(registry_volume) > 0:
if not is_valid_docker_name(registry_volume):
raise PreferencesError(
SETTINGS_KEY_REG_VOL,
f"'<b><tt>{registry_volume}</tt></b>' does not seem a valid volume name."
"\n\n"
f"'{registry_volume}' is not a valid Docker volume name.",
)
logging.debug(
"[PREFERENCES] Validating the registry address can be resolved..."
)
try:
socket.gethostbyname_ex(registry_name)
except Exception as e:
raise PreferencesError(
SETTINGS_KEY_REG_ADDRESS,
f"DNS name '<b><tt>{registry_name}</tt></b>' cannot be resolved."
"\n\n"
f"'{registry_name}' cannot be resolved as a valid DNS name. That usually means that either\n\n"
f"1) your DNS server is not resolving this name, or"
"\n\n"
f"2) you must add a line like '<tt>127.0.0.1 {registry_name}</tt>' "
f"in your '<tt>/etc/hosts</tt>' file.",
)
###############################################################################
# Advanced: K3s settings
###############################################################################
class K3sSettingsPage(SettingsPage):
_managed_settings = [
SETTINGS_KEY_K3D_IMAGE,
SETTINGS_KEY_K3S_ARGS,
]
def __init__(self, **kwargs):
self._docker = kwargs.pop("docker", None)
super().__init__(
activatable=False,
description="k3s server settings",
icon_name="preferences-system",
title="K3s",
**kwargs,
)
images_store = Gtk.ListStore(str)
logging.debug(f"Adding k3d images in the Docker Hub")
images_store.append([""]) # empty Docker image
for image in self._docker.get_official_k3s_images():
try:
name = image.attrs["RepoTags"][0]
logging.debug(f"... image {name}")
except Exception as e:
logging.debug(f"Could not grab information for image {image}: {e}")
else:
images_store.append([name]) # use `name` for id and... name
self.k3d_image = Gtk.ComboBox.new_with_model_and_entry(images_store)
renderer_text = Gtk.CellRendererText()
self.k3d_image.pack_start(renderer_text, True)
self.k3d_image.add_attribute(renderer_text, "text", 0)
self.k3d_image.hexpand = True
self.k3d_image.set_tooltip_text(
"When specified, will use an alternative Docker image for the k3d nodes."
"See the list of official k3s images at https://hub.docker.com/r/rancher/k3s/tags"
)
self.append_labeled_entry(
"k3d docker image:", self.k3d_image, SETTINGS_KEY_K3D_IMAGE
)
self.k3s_args = Gtk.Entry()
self.k3s_args.hexpand = True
self.k3s_args.set_tooltip_text(
"When specified, will add these extra arguments to the k3s server."
)
self.append_labeled_entry(
"k3s server args:", self.k3s_args, SETTINGS_KEY_K3S_ARGS
)
###############################################################################
# Advanced: hooks
###############################################################################
class HooksSettingsPage(SettingsPage):
_managed_settings = [
SETTINGS_KEY_CREATE_HOOK,
SETTINGS_KEY_DESTROY_HOOK,
]
def __init__(self, **kwargs):
self._docker = kwargs.pop("docker", None)
super().__init__(
activatable=False,
description="Scripts to run at some moments",
icon_name="folder",
title="Scripts",
**kwargs,
)
self.cluster_create_hook = Gtk.Entry()
self.cluster_create_hook.props.hexpand = False
self.cluster_create_hook.set_tooltip_text(
"A script that will be run right after creating a new cluster"
)
self.cluster_create_hook.set_icon_from_icon_name(
Gtk.EntryIconPosition.SECONDARY, "folder-open"
)
self.append_labeled_entry(
"After-creation script:", self.cluster_create_hook, SETTINGS_KEY_CREATE_HOOK
)
self.cluster_destroy_hook = Gtk.Entry()
self.cluster_destroy_hook.props.hexpand = False
self.cluster_destroy_hook.set_tooltip_text(
"A script that will be run right | |
import pickle
import argparse
import os
import csv
import multiprocessing
import random
from time import sleep
import pandas as pd
import yaml
import skopt
import ray
from ray.tune import run
from ray.tune.suggest.hyperopt import HyperOptSearch
from ray.tune.suggest.skopt import SkOptSearch
from ray.tune.automl import GeneticSearch
import hyperopt as hpo
import ctlearn_optimizer.bayesian_tpe as bayesian_tpe
import ctlearn_optimizer.bayesian_gp as bayesian_gp
import ctlearn_optimizer.genetic_algorithm as genetic_algorithm
import ctlearn_optimizer.common as common
# set dummy authentication key for multiprocessing
multiprocessing.current_process().authkey = b'1234'
class Optimizer:
""" Basic class for an optimizer.
Currently, only tree parzen estimators, random search, gaussian processes
and genetic algorithm based optimization using Ray Tune is supported.
"""
def __init__(self, opt_config):
""" Initialize the optimizer:
- Set optimizer attributes.
- Set logger writing to both ``optimization.log`` and ``stdout``.
- Load trials file from ``working_directory/trials.pkl``
if required, thus allowing to resume a past optimization run.
- Load optimization results file from
``working_directory/optimization_results.csv`` or create one at
the same path as required. The results of the optimization run
(loss, iteration, metrics, hyperparameters, time) are logged to
this file for further analysis.
Parameters:
opt_config (dict): loaded optimization configuration file.
Raises:
NotImplementedError: if ``self.optimization_type`` is
``genetic_algorithm`` and ``self.reload_trials`` is ``True``.
"""
# set working directory path
gen_settings = opt_config['General_settings']
self.working_directory = gen_settings.get('working_directory',
os.getcwd())
# create new logger
self.log_path = os.path.join(self.working_directory,
'optimization.log')
self.logger = common.set_logger(self.log_path)
self.logger.info('Starting optimization run')
# ray tune optimizator runs the objective function on a different
# Python process, so we have to use multiprocessing manager to share
# variables between processes.
manager = multiprocessing.Manager()
# set global run iteration (for taking into account reloaded trials)
self.iteration = manager.Value('i', 0)
# set current run iteration (always starts from 0)
self.counter = manager.Value('i', 0)
# set random state seed for reproducible results
self.random_state = gen_settings.get('random_state', None)
# set hardware resources
self.num_cpus = gen_settings['num_cpus']
self.num_cpus_per_trial = gen_settings['num_cpus_per_trial']
self.num_gpus = gen_settings['num_gpus']
self.num_gpus_per_trial = gen_settings['num_gpus_per_trial']
# set file paths
self.ctlearn_config_path = os.path.join(self.working_directory,
gen_settings['ctlearn_config'])
self.trials_file_path = os.path.join(self.working_directory,
'trials.pkl')
self.optim_results_path = os.path.join(self.working_directory,
'optimization_results.csv')
self.remove_training_folders = gen_settings.get(
'remove_training_folders', True)
# set optimizer configuration
self.num_parallel_trials = gen_settings.get('num_parallel_trials', 1)
self.mode = gen_settings.get('mode', 'max')
self.n_initial_points = gen_settings.get('n_initial_points', 30)
self.optimization_type = gen_settings['optimization_type']
self.num_max_evals = gen_settings['num_max_evals']
self.logger.info('Optimization algorithm:{}'.format(
self.optimization_type))
self.metric_to_optimize = gen_settings['metric_to_optimize']
self.predict = gen_settings.get('predict', False)
self.data_set_to_optimize = gen_settings.get('data_set_to_optimize',
'validation')
if self.data_set_to_optimize == 'prediction':
assert self.predict is True
if self.optimization_type == 'genetic_algorithm':
optimizer_info = opt_config['Optimizer_settings']
self.ga_config = optimizer_info['genetic_algorithm_config']
else:
optimizer_info = opt_config.get('Optimizer_settings', None)
self.ga_config = optimizer_info.get('genetic_algorithm_config',
None)
self.tpe_config = optimizer_info.get('tree_parzen_estimators_config',
None)
self.gp_config = optimizer_info.get('gaussian_processes_config', None)
# set ctlearn basic configuration
self.basic_config = opt_config['CTLearn_settings']
# set hyperparameters logging and configuration
hyperparameters_info = opt_config['Hyperparameters_settings']
self.hyperparams_to_log = (hyperparameters_info
['hyperparameters_to_log'])
self.hyperparameters_config = (hyperparameters_info['config'])
self.hyperparams_to_optimize = (hyperparameters_info
['hyperparameters_to_optimize'])
self.fixed_hyperparameters = hyperparameters_info.get(
'fixed_hyperparameters', None)
self.dependent_hyperparameters = hyperparameters_info.get(
'dependent_hyperparameters', None)
# set metrics logging and configuration
self.list_metrics_val_to_log = gen_settings.get('metrics_val_to_log',
[])
self.list_metrics_pred_to_log = gen_settings.get('metrics_pred_to_log',
[])
if self.list_metrics_pred_to_log:
assert self.predict is True
else:
assert self.data_set_to_optimize != 'prediction'
self.user_defined_metric_val = gen_settings.get(
'user_defined_metric_val', None)
self.user_defined_metric_pred = gen_settings.get(
'user_defined_metric_pred', None)
# create hyperparameters space, used for trial reloading
hyperparameter_space = self.create_space_hyperparams()
# set trial reloading configuration and create optimization algorithm
self.reload_trials = gen_settings.get('reload_trials', False)
if self.reload_trials:
if self.optimization_type == 'genetic_algorithm':
raise NotImplementedError('Trial reloading is not currently \
supported by the genetic algorithm optimization')
else:
# reload trials file
assert os.path.isfile(self.trials_file_path)
with open(self.trials_file_path, 'rb') as input_file:
trials = pickle.load(input_file)
if self.optimization_type == 'gaussian_processes':
# the gaussian processes based optimization needs an
# extra parameter self.gp_opt
self.gp_opt = common.restore(self)
# create optimization algorithm with reloaded trials
self.optimization_algorithm = \
self.create_optimization_algorithm(
hyperparameter_space)
# set global run iteration value to start from the
# number of trials of the reloaded run
self.iteration.value = len(trials.Xi)
else: # optimization type == tpe or random_search
# create optimization algorithm with reloaded trials
self.optimization_algorithm = \
self.create_optimization_algorithm(
hyperparameter_space)
common.restore(self)
if self.optimization_type == 'random_search':
self.optimization_algorithm.algo = hpo.rand.suggest
# set global run iteration value to start from the
# number of trials of the reloaded run
self.iteration.value = len(trials[0].trials)
self.logger.info(
'A trials file with {} saved trials has been reloaded, \
new trials will be added'.format(self.iteration.value))
else:
self.logger.info('No trials file loaded, starting from scratch')
self.gp_opt = None
# create optimization algorithm
self.optimization_algorithm = self.create_optimization_algorithm(
hyperparameter_space)
self.iteration.value = 0
# set optimization results configuration
reload_optimization_results = gen_settings.get(
'reload_optimization_results', False)
if reload_optimization_results:
# reload optimization_results file
assert os.path.isfile(self.optim_results_path)
with open(self.optim_results_path, 'r') as file:
existing_iters_csv = len(file.readlines()) - 1
self.logger.info(
'An optimization_results file with {} saved trials has been \
reloaded, new trials will be added'.format(existing_iters_csv))
if existing_iters_csv != self.iteration.value:
self.logger.WARNING(
'Caution: the number of trials stored in the trials file \
and the number of trials stored in the \
optimization_results file does not match')
else: # create optimization_results.csv
self.logger.info(
'No optimization_results file loaded, starting from scratch')
with open(self.optim_results_path, 'w') as file:
writer = csv.writer(file)
header = ['loss', 'iteration'] + self.hyperparams_to_log + \
[elem + '_val' for elem in self.list_metrics_val_to_log] + \
[elem + '_pred' for elem in self.list_metrics_pred_to_log] + \
['run_time']
writer.writerow(header)
def set_basic_config(self):
"""Set basic config and fixed hyperparameters in CTLearn config file.
"""
common.set_basic_config(self)
def create_space_hyperparams(self):
""" Create space of hyperparameters following required syntax.
Currently, only tree parzen estimators and random search spaces based
on hyperopt, gaussian processes space based on skopt and
genetic algorithm space based on ray.tune.automl are supported.
Returns:
space of hyperparameters following the syntax required by the
optimization algorithm.
Raises:
NotImplementedError: if ``self.optimization_type`` is other than
``tree_parzen_estimators``, ``random_search``,
``gaussian_processes`` or ``genetic_algorithm``.
"""
hyper_to_opt = self.hyperparams_to_optimize
if self.optimization_type == 'tree_parzen_estimators':
hyperparameter_space = bayesian_tpe.hyperopt_space(hyper_to_opt)
elif self.optimization_type == 'random_search':
hyperparameter_space = bayesian_tpe.hyperopt_space(hyper_to_opt)
elif self.optimization_type == 'gaussian_processes':
hyperparameter_space = bayesian_gp.skopt_space(hyper_to_opt)
elif self.optimization_type == 'genetic_algorithm':
hyperparameter_space = genetic_algorithm.gen_al_space(hyper_to_opt)
else:
raise NotImplementedError(
'Other optimization types are not supported yet')
return hyperparameter_space
def create_optimization_algorithm(self, hyperparameter_space):
""" Create optimization algorithm for Ray Tune.
Currently, only tree parzen estimators, random search,
gaussian processes and genetic algorithm based optimization using Ray
Tune is supported.
Parameters:
space (dict, list or ray.tune.automl.search_space.SearchSpace):
space of hyperparameters following the syntax required by
the optimization algorithm.
Returns:
Optimization algorithm for Ray Tune.
Raises:
NotImplementedError: if self.optimization_type is other than
``tree_parzen_estimators``, ``random_search``,
``gaussian_processes`` or ``genetic_algorithm``.
"""
if self.optimization_type == 'tree_parzen_estimators':
algorithm = HyperOptSearch(
hyperparameter_space,
max_concurrent=self.num_parallel_trials,
metric='loss',
mode=self.mode,
n_initial_points=self. n_initial_points,
random_state_seed=self.random_state,
gamma=self.tpe_config.get('gamma', 0.25))
elif self.optimization_type == 'random_search':
algorithm = HyperOptSearch(
hyperparameter_space,
max_concurrent=self.num_parallel_trials,
metric='loss',
mode=self.mode,
random_state_seed=self.random_state)
algorithm.algo = hpo.rand.suggest
elif self.optimization_type == 'gaussian_processes':
if not self.reload_trials:
# the gaussian processes based optimization needs an
# extra parameter self.gp_opt
self.gp_opt = skopt.Optimizer(
hyperparameter_space,
n_initial_points=self.n_initial_points,
base_estimator=self.gp_config.get(
'base_estimator', 'GP'),
acq_func=self.gp_config.get(
'acq_function', 'gp_hedge'),
acq_optimizer=self.gp_config.get(
'acq_optimizer', 'auto'),
random_state=self.random_state,
acq_func_kwargs={'xi': self.gp_config.get('xi',
0.01),
'kappa': self.gp_config.get('kappa',
1.96)})
hyperparams_names = [key for key in self.hyperparams_to_optimize]
algorithm = SkOptSearch(self.gp_opt,
hyperparams_names,
max_concurrent=self.num_parallel_trials,
metric='loss',
mode=self.mode)
elif self.optimization_type == 'genetic_algorithm':
algorithm = GeneticSearch(
hyperparameter_space,
reward_attr='loss',
max_generation=self.ga_config['max_generation'],
population_size=self.ga_config['population_size'],
population_decay=self.ga_config.get('population_decay', 0.95),
keep_top_ratio=self.ga_config.get('keep_top_ratio', 0.2),
selection_bound=self.ga_config.get('selection_bound', 0.4),
crossover_bound=self.ga_config.get('crossover_bound', 0.4))
else:
raise NotImplementedError(
'Other optimization types are not supported yet')
return algorithm
def get_ctlearn_metric_to_optimize(self, hyperparams):
""" Evaluate a CTLearn model and return metric to optimize.
Parameters:
hyperparams (dict): set of hyperparameters to evaluate provided by
the optimizer.
Returns:
float: metric to optimize.
"""
# if more than one CTLearn model is going to be evaluated at the same
# time, delay the execution of each model for a small random number of
# seconds in order to avoid the simultaneus creation of CTLearn working
# folders that have the same name, which would lead to errors
if self.num_parallel_trials > 1:
sleep(random.randint(1, 20))
# evaluate a CTLearn model and return metric
metric = common.ctlearn_objective(self, hyperparams)
if self.optimization_type == 'genetic_algorithm':
# the genetic algorithm internally maximizes the loss, so:
if self.mode == 'min':
metric *= -1
return metric
def optimize(self, objective_function):
""" Start the optimization of ``objective_function`` using Ray Tune.
Currently, only tree parzen estimators, random search,
gaussian processes and genetic algorithm based optimization using Ray
Tune is supported.
Parameters:
objective_function: function to optimize following the syntax:
.. code:: python
def(hyperparams, reporter):
...
# compute loss to optimize
...
reporter(loss=loss)
Returns:
ExperimentAnalysis: object used for analyzing results from Tune
``run()``.
"""
# start a Ray cluster with given resources and connect to it
ray.init(num_cpus=self.num_cpus,
num_gpus=self.num_gpus)
# set CTLearn basic config and fixed hyperparameters
self.set_basic_config()
# create custom trial names |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.