content stringlengths 27 928k | path stringlengths 4 230 | size int64 27 928k | nl_text stringlengths 21 396k | nl_size int64 21 396k | nl_language stringlengths 2 3 | nl_language_score float64 0.04 1 |
|---|---|---|---|---|---|---|
from __future__ import print_function, division
import logging
import numpy as np
from . import operators
from . import utils
from . import algorithms
def delta_data(A, S, Y, W=1):
return W*(A.dot(S) - Y)
def grad_likelihood_A(A, S, Y, W=1):
D = delta_data(A, S, Y, W=W)
return D.dot(S.T)
def grad_likelihood_S(S, A, Y, W=1):
D = delta_data(A, S, Y, W=W)
return A.T.dot(D)
# executes one proximal step of likelihood gradient, followed by prox_g
def prox_likelihood_A(A, step, S=None, Y=None, prox_g=None, W=1):
return prox_g(A - step*grad_likelihood_A(A, S, Y, W=W), step)
def prox_likelihood_S(S, step, A=None, Y=None, prox_g=None, W=1):
return prox_g(S - step*grad_likelihood_S(S, A, Y, W=W), step)
def prox_likelihood(X, step, Xs=None, j=None, Y=None, WA=None, WS=None, prox_S=operators.prox_id, prox_A=operators.prox_id):
if j == 0:
return prox_likelihood_A(X, step, S=Xs[1], Y=Y, prox_g=prox_A, W=WA)
else:
return prox_likelihood_S(X, step, A=Xs[0], Y=Y, prox_g=prox_S, W=WS)
class Steps_AS:
def __init__(self, WA=1, WS=1, slack=0.1, max_stride=100):
"""Helper class to compute the Lipschitz constants of grad f.
The __call__ function compute the spectral norms of A or S, which
determine the Lipschitz constant of the respective update steps.
If a weight matrix is used, the stepsize will be upper bounded by
assuming the maximum value of the weights. In the case of varying
weights, it is generally advised to normalize the weight matrix
differently for the A and S updates, therefore two maximum numbers
(WAMax, WSmax) can be set.
Because the spectral norm is expensive to compute, it will only update
the step_size if relative changes of L exceed slack/2.
If not, which is usually the case after only a few iterations, it will
report a previous value for the next several iterations. The stride
between updates is set by
stride -> stride * (slack/2 / rel_error
i.e. it increases more strongly if the rel_error is much below the
slack budget.
"""
import scipy.sparse
if WA is 1:
self.WA = WA
else:
self.WA = scipy.sparse.diags(WA.reshape(-1))
if WS is 1:
self.WS = WS
else:
self.WS = scipy.sparse.diags(WS.reshape(-1))
# two independent caches for Lipschitz constants
self._cb = [utils.ApproximateCache(self._one_over_lipschitzA, slack=slack, max_stride=max_stride),
utils.ApproximateCache(self._one_over_lipschitzS, slack=slack, max_stride=max_stride)]
def _one_over_lipschitzA(self, Xs):
A,S = Xs
if self.WA is 1:
return 1./utils.get_spectral_norm(S.T)
else: # full weight matrix, need to serialize S along k
import scipy.sparse
Ss = scipy.sparse.block_diag([S.T for b in range(len(A))])
# Lipschitz constant for grad_A = || S Sigma_1 S.T||_s
SSigma_1S = Ss.T.dot(self.WA.dot(Ss))
LA = np.real(scipy.sparse.linalg.eigs(SSigma_1S, k=1, return_eigenvectors=False)[0])
return 1./LA
def _one_over_lipschitzS(self, Xs):
A,S = Xs
if self.WA is 1:
return 1./utils.get_spectral_norm(A)
else:
import scipy.sparse
N = S.shape[1]
As = scipy.sparse.bmat([[scipy.sparse.identity(N) * A[b,k] for k in range(A.shape[1])] for b in range(A.shape[0])])
ASigma_1A = As.T.dot(self.WS.dot(As))
LS = np.real(scipy.sparse.linalg.eigs(ASigma_1A, k=1, return_eigenvectors=False)[0])
return 1./LS
def __call__(self, j, Xs):
return self._cb[j](Xs)
def normalizeMatrix(M, axis):
if axis == 1:
norm = np.sum(M, axis=axis)
norm = np.broadcast_to(norm, M.T.shape)
norm = norm.T
else:
norm = np.sum(M, axis=axis)
norm = np.broadcast_to(norm, M.shape)
return norm
def nmf(Y, A, S, W=None, prox_A=operators.prox_plus, prox_S=operators.prox_plus, proxs_g=None, steps_g=None, Ls=None, slack=0.9, update_order=None, steps_g_update='steps_f', max_iter=1000, e_rel=1e-3, e_abs=0, traceback=None):
"""Non-negative matrix factorization.
This method solves the NMF problem
minimize || Y - AS ||_2^2
under an arbitrary number of constraints on A and/or S.
Args:
Y: target matrix MxN
A: initial amplitude matrix MxK, will be updated
S: initial source matrix KxN, will be updated
W: (optional weight matrix MxN)
prox_A: direct projection contraint of A
prox_S: direct projection constraint of S
proxs_g: list of constraints for A or S for ADMM-type optimization
[[prox_A_0, prox_A_1...],[prox_S_0, prox_S_1,...]]
steps_g: specific value of step size for proxs_g (experts only!)
Ls: list of linear operators for the constraint functions proxs_g
If set, needs to have same format as proxs_g.
Matrices can be numpy.array, scipy.sparse, or None (for identity).
slack: tolerance for (re)evaluation of Lipschitz constants
See Steps_AS() for details.
update_order: list of factor indices in update order
j=0 -> A, j=1 -> S
max_iter: maximum iteration number, irrespective of current residuals
e_rel: relative error threshold for primal and dual residuals
e_abs: absolute error threshold for primal and dual residuals
traceback: utils.Traceback to hold variable histories
Returns:
converged: convence test for A,S
errors: difference between latest and previous iterations for A,S
See also:
algorithms.bsdmm for update_order and steps_g_update
utils.AcceleratedProxF for Nesterov acceleration
Reference:
Moolekamp & Melchior, 2017 (arXiv:1708.09066)
"""
# create stepsize callback, needs max of W
if W is not None:
# normalize in pixel and band directions to have similar update speeds
WA = normalizeMatrix(W, 1)
WS = normalizeMatrix(W, 0)
else:
WA = WS = 1
steps_f = Steps_AS(WA=WA, WS=WS, slack=slack)
# gradient step, followed by direct application of prox_S or prox_A
from functools import partial
f = partial(prox_likelihood, Y=Y, WA=WA, WS=WS, prox_S=prox_S, prox_A=prox_A)
X = [A, S]
# use accelerated block-PGM if there's no proxs_g
if proxs_g is None or not utils.hasNotNone(proxs_g):
return algorithms.bpgm(X, f, steps_f, accelerated=True, update_order=update_order, max_iter=max_iter, e_rel=e_rel, traceback=traceback)
else:
return algorithms.bsdmm(X, f, steps_f, proxs_g, steps_g=steps_g, Ls=Ls, update_order=update_order, steps_g_update=steps_g_update, max_iter=max_iter, e_rel=e_rel, e_abs=e_abs, traceback=traceback)
| proxmin/nmf.py | 6,974 | Helper class to compute the Lipschitz constants of grad f.
The __call__ function compute the spectral norms of A or S, which
determine the Lipschitz constant of the respective update steps.
If a weight matrix is used, the stepsize will be upper bounded by
assuming the maximum value of the weights. In the case of varying
weights, it is generally advised to normalize the weight matrix
differently for the A and S updates, therefore two maximum numbers
(WAMax, WSmax) can be set.
Because the spectral norm is expensive to compute, it will only update
the step_size if relative changes of L exceed slack/2.
If not, which is usually the case after only a few iterations, it will
report a previous value for the next several iterations. The stride
between updates is set by
stride -> stride * (slack/2 / rel_error
i.e. it increases more strongly if the rel_error is much below the
slack budget.
Non-negative matrix factorization.
This method solves the NMF problem
minimize || Y - AS ||_2^2
under an arbitrary number of constraints on A and/or S.
Args:
Y: target matrix MxN
A: initial amplitude matrix MxK, will be updated
S: initial source matrix KxN, will be updated
W: (optional weight matrix MxN)
prox_A: direct projection contraint of A
prox_S: direct projection constraint of S
proxs_g: list of constraints for A or S for ADMM-type optimization
[[prox_A_0, prox_A_1...],[prox_S_0, prox_S_1,...]]
steps_g: specific value of step size for proxs_g (experts only!)
Ls: list of linear operators for the constraint functions proxs_g
If set, needs to have same format as proxs_g.
Matrices can be numpy.array, scipy.sparse, or None (for identity).
slack: tolerance for (re)evaluation of Lipschitz constants
See Steps_AS() for details.
update_order: list of factor indices in update order
j=0 -> A, j=1 -> S
max_iter: maximum iteration number, irrespective of current residuals
e_rel: relative error threshold for primal and dual residuals
e_abs: absolute error threshold for primal and dual residuals
traceback: utils.Traceback to hold variable histories
Returns:
converged: convence test for A,S
errors: difference between latest and previous iterations for A,S
See also:
algorithms.bsdmm for update_order and steps_g_update
utils.AcceleratedProxF for Nesterov acceleration
Reference:
Moolekamp & Melchior, 2017 (arXiv:1708.09066)
executes one proximal step of likelihood gradient, followed by prox_g two independent caches for Lipschitz constants full weight matrix, need to serialize S along k Lipschitz constant for grad_A = || S Sigma_1 S.T||_s create stepsize callback, needs max of W normalize in pixel and band directions to have similar update speeds gradient step, followed by direct application of prox_S or prox_A use accelerated block-PGM if there's no proxs_g | 2,907 | en | 0.791016 |
import json
import logging
import math
import os
import random
import warnings
from dataclasses import asdict
from multiprocessing import Pool, cpu_count
from pathlib import Path
import numpy as np
import pandas as pd
import torch
from tensorboardX import SummaryWriter
from torch.nn.utils.rnn import pad_sequence
from torch.utils.data import DataLoader, Dataset, RandomSampler, SequentialSampler
from torch.utils.data.distributed import DistributedSampler
from tqdm.auto import tqdm, trange
from transformers import (
AdamW,
AutoConfig,
AutoModel,
AutoTokenizer,
BartConfig,
BartForConditionalGeneration,
BartTokenizer,
BertConfig,
BertForMaskedLM,
BertModel,
BertTokenizer,
CamembertConfig,
CamembertModel,
CamembertTokenizer,
DistilBertConfig,
DistilBertModel,
DistilBertTokenizer,
ElectraConfig,
ElectraModel,
ElectraTokenizer,
EncoderDecoderConfig,
EncoderDecoderModel,
LongformerConfig,
LongformerModel,
LongformerTokenizer,
MarianConfig,
MarianMTModel,
MarianTokenizer,
MobileBertConfig,
MobileBertModel,
MobileBertTokenizer,
PreTrainedModel,
PreTrainedTokenizer,
RobertaConfig,
RobertaModel,
RobertaTokenizer,
get_linear_schedule_with_warmup,
)
from simpletransformers.config.global_args import global_args
from simpletransformers.config.model_args import Seq2SeqArgs
from simpletransformers.seq2seq.seq2seq_utils import Seq2SeqDataset, SimpleSummarizationDataset
try:
import wandb
wandb_available = True
except ImportError:
wandb_available = False
logger = logging.getLogger(__name__)
MODEL_CLASSES = {
"auto": (AutoConfig, AutoModel, AutoTokenizer),
"bart": (BartConfig, BartForConditionalGeneration, BartTokenizer),
"bert": (BertConfig, BertModel, BertTokenizer),
"camembert": (CamembertConfig, CamembertModel, CamembertTokenizer),
"distilbert": (DistilBertConfig, DistilBertModel, DistilBertTokenizer),
"electra": (ElectraConfig, ElectraModel, ElectraTokenizer),
"longformer": (LongformerConfig, LongformerModel, LongformerTokenizer),
"mobilebert": (MobileBertConfig, MobileBertModel, MobileBertTokenizer),
"marian": (MarianConfig, MarianMTModel, MarianTokenizer),
"roberta": (RobertaConfig, RobertaModel, RobertaTokenizer),
}
class Seq2SeqModel:
def __init__(
self,
encoder_type=None,
encoder_name=None,
decoder_name=None,
encoder_decoder_type=None,
encoder_decoder_name=None,
config=None,
args=None,
use_cuda=True,
cuda_device=-1,
**kwargs,
):
"""
Initializes a Seq2SeqModel.
Args:
encoder_type (optional): The type of model to use as the encoder.
encoder_name (optional): The exact architecture and trained weights to use. This may be a Hugging Face Transformers compatible pre-trained model, a community model, or the path to a directory containing model files.
decoder_name (optional): The exact architecture and trained weights to use. This may be a Hugging Face Transformers compatible pre-trained model, a community model, or the path to a directory containing model files.
Must be the same "size" as the encoder model (base/base, large/large, etc.)
encoder_decoder_type (optional): The type of encoder-decoder model. (E.g. bart)
encoder_decoder_name (optional): The path to a directory containing the saved encoder and decoder of a Seq2SeqModel. (E.g. "outputs/") OR a valid BART or MarianMT model.
config (optional): A configuration file to build an EncoderDecoderModel.
args (optional): Default args will be used if this parameter is not provided. If provided, it should be a dict containing the args that should be changed in the default args.
use_cuda (optional): Use GPU if available. Setting to False will force model to use CPU only.
cuda_device (optional): Specific GPU that should be used. Will use the first available GPU by default.
**kwargs (optional): For providing proxies, force_download, resume_download, cache_dir and other options specific to the 'from_pretrained' implementation where this will be supplied.
""" # noqa: ignore flake8"
if not config:
# if not ((encoder_name and decoder_name) or encoder_decoder_name) and not encoder_type:
if not ((encoder_name and decoder_name) or encoder_decoder_name):
raise ValueError(
"You must specify a Seq2Seq config \t OR \t"
"encoder_type, encoder_name, and decoder_name OR \t \t"
"encoder_type and encoder_decoder_name"
)
elif not (encoder_type or encoder_decoder_type):
raise ValueError(
"You must specify a Seq2Seq config \t OR \t"
"encoder_type, encoder_name, and decoder_name \t OR \t"
"encoder_type and encoder_decoder_name"
)
self.args = self._load_model_args(encoder_decoder_name)
if isinstance(args, dict):
self.args.update_from_dict(args)
elif isinstance(args, Seq2SeqArgs):
self.args = args
if "sweep_config" in kwargs:
sweep_config = kwargs.pop("sweep_config")
sweep_values = {key: value["value"] for key, value in sweep_config.as_dict().items() if key != "_wandb"}
self.args.update_from_dict(sweep_values)
if self.args.manual_seed:
random.seed(self.args.manual_seed)
np.random.seed(self.args.manual_seed)
torch.manual_seed(self.args.manual_seed)
if self.args.n_gpu > 0:
torch.cuda.manual_seed_all(self.args.manual_seed)
if use_cuda:
if torch.cuda.is_available():
if cuda_device == -1:
self.device = torch.device("cuda")
else:
self.device = torch.device(f"cuda:{cuda_device}")
else:
raise ValueError(
"'use_cuda' set to True when cuda is unavailable."
"Make sure CUDA is available or set `use_cuda=False`."
)
else:
self.device = "cpu"
self.results = {}
if not use_cuda:
self.args.fp16 = False
# config = EncoderDecoderConfig.from_encoder_decoder_configs(config, config)
if encoder_decoder_type:
config_class, model_class, tokenizer_class = MODEL_CLASSES[encoder_decoder_type]
else:
config_class, model_class, tokenizer_class = MODEL_CLASSES[encoder_type]
if encoder_decoder_type in ["bart", "marian"]:
self.model = model_class.from_pretrained(encoder_decoder_name)
if encoder_decoder_type == "bart":
self.encoder_tokenizer = tokenizer_class.from_pretrained(encoder_decoder_name)
elif encoder_decoder_type == "marian":
if self.args.base_marian_model_name:
self.encoder_tokenizer = tokenizer_class.from_pretrained(self.args.base_marian_model_name)
else:
self.encoder_tokenizer = tokenizer_class.from_pretrained(encoder_decoder_name)
self.decoder_tokenizer = self.encoder_tokenizer
self.config = self.model.config
else:
if encoder_decoder_name:
# self.model = EncoderDecoderModel.from_pretrained(encoder_decoder_name)
self.model = EncoderDecoderModel.from_encoder_decoder_pretrained(
os.path.join(encoder_decoder_name, "encoder"), os.path.join(encoder_decoder_name, "decoder")
)
self.model.encoder = model_class.from_pretrained(os.path.join(encoder_decoder_name, "encoder"))
self.model.decoder = BertForMaskedLM.from_pretrained(os.path.join(encoder_decoder_name, "decoder"))
self.encoder_tokenizer = tokenizer_class.from_pretrained(os.path.join(encoder_decoder_name, "encoder"))
self.decoder_tokenizer = BertTokenizer.from_pretrained(os.path.join(encoder_decoder_name, "decoder"))
else:
self.model = EncoderDecoderModel.from_encoder_decoder_pretrained(
encoder_name, decoder_name, config=config
)
self.encoder_tokenizer = tokenizer_class.from_pretrained(encoder_name)
self.decoder_tokenizer = BertTokenizer.from_pretrained(decoder_name)
self.encoder_config = self.model.config.encoder
self.decoder_config = self.model.config.decoder
if self.args.wandb_project and not wandb_available:
warnings.warn("wandb_project specified but wandb is not available. Wandb disabled.")
self.args.wandb_project = None
if encoder_decoder_name:
self.args.model_name = encoder_decoder_name
# # Checking if we are loading from a saved model or using a pre-trained model
# if not saved_model_args and encoder_decoder_type == "marian":
# Need to store base pre-trained model name to get the tokenizer when loading a saved model
self.args.base_marian_model_name = encoder_decoder_name
elif encoder_name and decoder_name:
self.args.model_name = encoder_name + "-" + decoder_name
else:
self.args.model_name = "encoder-decoder"
if encoder_decoder_type:
self.args.model_type = encoder_decoder_type
elif encoder_type:
self.args.model_type = encoder_type + "-bert"
else:
self.args.model_type = "encoder-decoder"
def train_model(
self, train_data, output_dir=None, show_running_loss=True, args=None, eval_data=None, verbose=True, **kwargs,
):
"""
Trains the model using 'train_data'
Args:
train_data: Pandas DataFrame containing the 2 columns - `input_text`, `target_text`.
- `input_text`: The input text sequence.
- `target_text`: The target text sequence
output_dir: The directory where model files will be saved. If not given, self.args.output_dir will be used.
show_running_loss (optional): Set to False to prevent running loss from being printed to console. Defaults to True.
args (optional): Optional changes to the args dict of the model. Any changes made will persist for the model.
eval_data (optional): A DataFrame against which evaluation will be performed when evaluate_during_training is enabled. Is required if evaluate_during_training is enabled.
**kwargs: Additional metrics that should be used. Pass in the metrics as keyword arguments (name of metric: function to use).
A metric function should take in two parameters. The first parameter will be the true labels, and the second parameter will be the predictions. Both inputs
will be lists of strings. Note that this will slow down training significantly as the predicted sequences need to be generated.
Returns:
None
""" # noqa: ignore flake8"
if args:
self.args.update_from_dict(args)
# if self.args.silent:
# show_running_loss = False
if self.args.evaluate_during_training and eval_data is None:
raise ValueError(
"evaluate_during_training is enabled but eval_data is not specified."
" Pass eval_data to model.train_model() if using evaluate_during_training."
)
if not output_dir:
output_dir = self.args.output_dir
if os.path.exists(output_dir) and os.listdir(output_dir) and not self.args.overwrite_output_dir:
raise ValueError(
"Output directory ({}) already exists and is not empty."
" Set args.overwrite_output_dir = True to overcome.".format(output_dir)
)
self._move_model_to_device()
train_dataset = self.load_and_cache_examples(train_data, verbose=verbose)
os.makedirs(output_dir, exist_ok=True)
global_step, tr_loss = self.train(
train_dataset,
output_dir,
show_running_loss=show_running_loss,
eval_data=eval_data,
verbose=verbose,
**kwargs,
)
self._save_model(self.args.output_dir, model=self.model)
# model_to_save = self.model.module if hasattr(self.model, "module") else self.model
# model_to_save.save_pretrained(output_dir)
# self.encoder_tokenizer.save_pretrained(output_dir)
# self.decoder_tokenizer.save_pretrained(output_dir)
# torch.save(self.args, os.path.join(output_dir, "training_args.bin"))
if verbose:
logger.info(" Training of {} model complete. Saved to {}.".format(self.args.model_name, output_dir))
def train(
self, train_dataset, output_dir, show_running_loss=True, eval_data=None, verbose=True, **kwargs,
):
"""
Trains the model on train_dataset.
Utility function to be used by the train_model() method. Not intended to be used directly.
"""
model = self.model
args = self.args
tb_writer = SummaryWriter(logdir=args.tensorboard_dir)
train_sampler = RandomSampler(train_dataset)
train_dataloader = DataLoader(
train_dataset,
sampler=train_sampler,
batch_size=args.train_batch_size,
num_workers=self.args.dataloader_num_workers,
)
if args.max_steps > 0:
t_total = args.max_steps
args.num_train_epochs = args.max_steps // (len(train_dataloader) // args.gradient_accumulation_steps) + 1
else:
t_total = len(train_dataloader) // args.gradient_accumulation_steps * args.num_train_epochs
no_decay = ["bias", "LayerNorm.weight"]
optimizer_grouped_parameters = []
custom_parameter_names = set()
for group in self.args.custom_parameter_groups:
params = group.pop("params")
custom_parameter_names.update(params)
param_group = {**group}
param_group["params"] = [p for n, p in model.named_parameters() if n in params]
optimizer_grouped_parameters.append(param_group)
for group in self.args.custom_layer_parameters:
layer_number = group.pop("layer")
layer = f"layer.{layer_number}."
group_d = {**group}
group_nd = {**group}
group_nd["weight_decay"] = 0.0
params_d = []
params_nd = []
for n, p in model.named_parameters():
if n not in custom_parameter_names and layer in n:
if any(nd in n for nd in no_decay):
params_nd.append(p)
else:
params_d.append(p)
custom_parameter_names.add(n)
group_d["params"] = params_d
group_nd["params"] = params_nd
optimizer_grouped_parameters.append(group_d)
optimizer_grouped_parameters.append(group_nd)
if not self.args.train_custom_parameters_only:
optimizer_grouped_parameters.extend(
[
{
"params": [
p
for n, p in model.named_parameters()
if n not in custom_parameter_names and not any(nd in n for nd in no_decay)
],
"weight_decay": args.weight_decay,
},
{
"params": [
p
for n, p in model.named_parameters()
if n not in custom_parameter_names and any(nd in n for nd in no_decay)
],
"weight_decay": 0.0,
},
]
)
warmup_steps = math.ceil(t_total * args.warmup_ratio)
args.warmup_steps = warmup_steps if args.warmup_steps == 0 else args.warmup_steps
# TODO: Use custom optimizer like with BertSum?
optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon)
scheduler = get_linear_schedule_with_warmup(
optimizer, num_warmup_steps=args.warmup_steps, num_training_steps=t_total
)
if (
args.model_name
and os.path.isfile(os.path.join(args.model_name, "optimizer.pt"))
and os.path.isfile(os.path.join(args.model_name, "scheduler.pt"))
):
# Load in optimizer and scheduler states
optimizer.load_state_dict(torch.load(os.path.join(args.model_name, "optimizer.pt")))
scheduler.load_state_dict(torch.load(os.path.join(args.model_name, "scheduler.pt")))
if args.n_gpu > 1:
model = torch.nn.DataParallel(model)
logger.info(" Training started")
global_step = 0
tr_loss, logging_loss = 0.0, 0.0
model.zero_grad()
train_iterator = trange(int(args.num_train_epochs), desc="Epoch", disable=args.silent, mininterval=0)
epoch_number = 0
best_eval_metric = None
early_stopping_counter = 0
steps_trained_in_current_epoch = 0
epochs_trained = 0
if args.model_name and os.path.exists(args.model_name):
try:
# set global_step to gobal_step of last saved checkpoint from model path
checkpoint_suffix = args.model_name.split("/")[-1].split("-")
if len(checkpoint_suffix) > 2:
checkpoint_suffix = checkpoint_suffix[1]
else:
checkpoint_suffix = checkpoint_suffix[-1]
global_step = int(checkpoint_suffix)
epochs_trained = global_step // (len(train_dataloader) // args.gradient_accumulation_steps)
steps_trained_in_current_epoch = global_step % (
len(train_dataloader) // args.gradient_accumulation_steps
)
logger.info(" Continuing training from checkpoint, will skip to saved global_step")
logger.info(" Continuing training from epoch %d", epochs_trained)
logger.info(" Continuing training from global step %d", global_step)
logger.info(" Will skip the first %d steps in the current epoch", steps_trained_in_current_epoch)
except ValueError:
logger.info(" Starting fine-tuning.")
if args.evaluate_during_training:
training_progress_scores = self._create_training_progress_scores(**kwargs)
if args.wandb_project:
wandb.init(project=args.wandb_project, config={**asdict(args)}, **args.wandb_kwargs)
wandb.watch(self.model)
if args.fp16:
from torch.cuda import amp
scaler = amp.GradScaler()
model.train()
for current_epoch in train_iterator:
if epochs_trained > 0:
epochs_trained -= 1
continue
train_iterator.set_description(f"Epoch {epoch_number + 1} of {args.num_train_epochs}")
batch_iterator = tqdm(
train_dataloader,
desc=f"Running Epoch {epoch_number} of {args.num_train_epochs}",
disable=args.silent,
mininterval=0,
)
for step, batch in enumerate(batch_iterator):
if steps_trained_in_current_epoch > 0:
steps_trained_in_current_epoch -= 1
continue
# batch = tuple(t.to(device) for t in batch)
inputs = self._get_inputs_dict(batch)
if args.fp16:
with amp.autocast():
outputs = model(**inputs)
# model outputs are always tuple in pytorch-transformers (see doc)
loss = outputs[0]
else:
outputs = model(**inputs)
# model outputs are always tuple in pytorch-transformers (see doc)
loss = outputs[0]
if args.n_gpu > 1:
loss = loss.mean() # mean() to average on multi-gpu parallel training
current_loss = loss.item()
if show_running_loss:
batch_iterator.set_description(
f"Epochs {epoch_number}/{args.num_train_epochs}. Running Loss: {current_loss:9.4f}"
)
if args.gradient_accumulation_steps > 1:
loss = loss / args.gradient_accumulation_steps
if args.fp16:
scaler.scale(loss).backward()
else:
loss.backward()
tr_loss += loss.item()
if (step + 1) % args.gradient_accumulation_steps == 0:
if args.fp16:
scaler.unscale_(optimizer)
torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)
if args.fp16:
scaler.step(optimizer)
scaler.update()
else:
optimizer.step()
scheduler.step() # Update learning rate schedule
model.zero_grad()
global_step += 1
if args.logging_steps > 0 and global_step % args.logging_steps == 0:
# Log metrics
tb_writer.add_scalar("lr", scheduler.get_lr()[0], global_step)
tb_writer.add_scalar("loss", (tr_loss - logging_loss) / args.logging_steps, global_step)
logging_loss = tr_loss
if args.wandb_project:
wandb.log(
{
"Training loss": current_loss,
"lr": scheduler.get_lr()[0],
"global_step": global_step,
}
)
if args.save_steps > 0 and global_step % args.save_steps == 0:
# Save model checkpoint
output_dir_current = os.path.join(output_dir, "checkpoint-{}".format(global_step))
self._save_model(output_dir_current, optimizer, scheduler, model=model)
if args.evaluate_during_training and (
args.evaluate_during_training_steps > 0
and global_step % args.evaluate_during_training_steps == 0
):
# Only evaluate when single GPU otherwise metrics may not average well
results = self.eval_model(
eval_data,
verbose=verbose and args.evaluate_during_training_verbose,
silent=args.evaluate_during_training_silent,
**kwargs,
)
for key, value in results.items():
tb_writer.add_scalar("eval_{}".format(key), value, global_step)
output_dir_current = os.path.join(output_dir, "checkpoint-{}".format(global_step))
if args.save_eval_checkpoints:
self._save_model(output_dir_current, optimizer, scheduler, model=model, results=results)
training_progress_scores["global_step"].append(global_step)
training_progress_scores["train_loss"].append(current_loss)
for key in results:
training_progress_scores[key].append(results[key])
report = pd.DataFrame(training_progress_scores)
report.to_csv(
os.path.join(args.output_dir, "training_progress_scores.csv"), index=False,
)
if args.wandb_project:
wandb.log(self._get_last_metrics(training_progress_scores))
if not best_eval_metric:
best_eval_metric = results[args.early_stopping_metric]
if args.save_best_model:
self._save_model(
args.best_model_dir, optimizer, scheduler, model=model, results=results
)
if best_eval_metric and args.early_stopping_metric_minimize:
if results[args.early_stopping_metric] - best_eval_metric < args.early_stopping_delta:
best_eval_metric = results[args.early_stopping_metric]
if args.save_best_model:
self._save_model(
args.best_model_dir, optimizer, scheduler, model=model, results=results
)
early_stopping_counter = 0
else:
if args.use_early_stopping:
if early_stopping_counter < args.early_stopping_patience:
early_stopping_counter += 1
if verbose:
logger.info(f" No improvement in {args.early_stopping_metric}")
logger.info(f" Current step: {early_stopping_counter}")
logger.info(f" Early stopping patience: {args.early_stopping_patience}")
else:
if verbose:
logger.info(f" Patience of {args.early_stopping_patience} steps reached")
logger.info(" Training terminated.")
train_iterator.close()
return global_step, tr_loss / global_step
else:
if results[args.early_stopping_metric] - best_eval_metric > args.early_stopping_delta:
best_eval_metric = results[args.early_stopping_metric]
if args.save_best_model:
self._save_model(
args.best_model_dir, optimizer, scheduler, model=model, results=results
)
early_stopping_counter = 0
else:
if args.use_early_stopping:
if early_stopping_counter < args.early_stopping_patience:
early_stopping_counter += 1
if verbose:
logger.info(f" No improvement in {args.early_stopping_metric}")
logger.info(f" Current step: {early_stopping_counter}")
logger.info(f" Early stopping patience: {args.early_stopping_patience}")
else:
if verbose:
logger.info(f" Patience of {args.early_stopping_patience} steps reached")
logger.info(" Training terminated.")
train_iterator.close()
return global_step, tr_loss / global_step
epoch_number += 1
output_dir_current = os.path.join(output_dir, "checkpoint-{}-epoch-{}".format(global_step, epoch_number))
if args.save_model_every_epoch or args.evaluate_during_training:
os.makedirs(output_dir_current, exist_ok=True)
if args.save_model_every_epoch:
self._save_model(output_dir_current, optimizer, scheduler, model=model)
if args.evaluate_during_training:
results = self.eval_model(
eval_data,
verbose=verbose and args.evaluate_during_training_verbose,
silent=args.evaluate_during_training_silent,
**kwargs,
)
if args.save_eval_checkpoints:
self._save_model(output_dir_current, optimizer, scheduler, results=results)
training_progress_scores["global_step"].append(global_step)
training_progress_scores["train_loss"].append(current_loss)
for key in results:
training_progress_scores[key].append(results[key])
report = pd.DataFrame(training_progress_scores)
report.to_csv(os.path.join(args.output_dir, "training_progress_scores.csv"), index=False)
if args.wandb_project:
wandb.log(self._get_last_metrics(training_progress_scores))
if not best_eval_metric:
best_eval_metric = results[args.early_stopping_metric]
if args.save_best_model:
self._save_model(args.best_model_dir, optimizer, scheduler, model=model, results=results)
if best_eval_metric and args.early_stopping_metric_minimize:
if results[args.early_stopping_metric] - best_eval_metric < args.early_stopping_delta:
best_eval_metric = results[args.early_stopping_metric]
if args.save_best_model:
self._save_model(args.best_model_dir, optimizer, scheduler, model=model, results=results)
early_stopping_counter = 0
else:
if args.use_early_stopping and args.early_stopping_consider_epochs:
if early_stopping_counter < args.early_stopping_patience:
early_stopping_counter += 1
if verbose:
logger.info(f" No improvement in {args.early_stopping_metric}")
logger.info(f" Current step: {early_stopping_counter}")
logger.info(f" Early stopping patience: {args.early_stopping_patience}")
else:
if verbose:
logger.info(f" Patience of {args.early_stopping_patience} steps reached")
logger.info(" Training terminated.")
train_iterator.close()
return global_step, tr_loss / global_step
else:
if results[args.early_stopping_metric] - best_eval_metric > args.early_stopping_delta:
best_eval_metric = results[args.early_stopping_metric]
if args.save_best_model:
self._save_model(args.best_model_dir, optimizer, scheduler, model=model, results=results)
early_stopping_counter = 0
else:
if args.use_early_stopping and args.early_stopping_consider_epochs:
if early_stopping_counter < args.early_stopping_patience:
early_stopping_counter += 1
if verbose:
logger.info(f" No improvement in {args.early_stopping_metric}")
logger.info(f" Current step: {early_stopping_counter}")
logger.info(f" Early stopping patience: {args.early_stopping_patience}")
else:
if verbose:
logger.info(f" Patience of {args.early_stopping_patience} steps reached")
logger.info(" Training terminated.")
train_iterator.close()
return global_step, tr_loss / global_step
return global_step, tr_loss / global_step
def eval_model(self, eval_data, output_dir=None, verbose=True, silent=False, **kwargs):
"""
Evaluates the model on eval_data. Saves results to output_dir.
Args:
eval_data: Pandas DataFrame containing the 2 columns - `input_text`, `target_text`.
- `input_text`: The input text sequence.
- `target_text`: The target text sequence.
output_dir: The directory where model files will be saved. If not given, self.args.output_dir will be used.
verbose: If verbose, results will be printed to the console on completion of evaluation.
silent: If silent, tqdm progress bars will be hidden.
**kwargs: Additional metrics that should be used. Pass in the metrics as keyword arguments (name of metric: function to use).
A metric function should take in two parameters. The first parameter will be the true labels, and the second parameter will be the predictions. Both inputs
will be lists of strings. Note that this will slow down evaluation significantly as the predicted sequences need to be generated.
Returns:
results: Dictionary containing evaluation results.
""" # noqa: ignore flake8"
if not output_dir:
output_dir = self.args.output_dir
self._move_model_to_device()
eval_dataset = self.load_and_cache_examples(eval_data, evaluate=True, verbose=verbose, silent=silent)
os.makedirs(output_dir, exist_ok=True)
result = self.evaluate(eval_dataset, output_dir, verbose=verbose, silent=silent, **kwargs)
self.results.update(result)
if self.args.evaluate_generated_text:
to_predict = eval_data["input_text"].tolist()
preds = self.predict(to_predict)
result = self.compute_metrics(eval_data["target_text"].tolist(), preds, **kwargs)
self.results.update(result)
if verbose:
logger.info(self.results)
return self.results
def evaluate(self, eval_dataset, output_dir, verbose=True, silent=False, **kwargs):
"""
Evaluates the model on eval_dataset.
Utility function to be used by the eval_model() method. Not intended to be used directly.
"""
model = self.model
args = self.args
eval_output_dir = output_dir
results = {}
eval_sampler = SequentialSampler(eval_dataset)
eval_dataloader = DataLoader(eval_dataset, sampler=eval_sampler, batch_size=args.eval_batch_size)
if args.n_gpu > 1:
model = torch.nn.DataParallel(model)
eval_loss = 0.0
nb_eval_steps = 0
model.eval()
for batch in tqdm(eval_dataloader, disable=args.silent or silent, desc="Running Evaluation"):
# batch = tuple(t.to(device) for t in batch)
inputs = self._get_inputs_dict(batch)
with torch.no_grad():
outputs = model(**inputs)
loss = outputs[0]
eval_loss += loss.mean().item()
nb_eval_steps += 1
eval_loss = eval_loss / nb_eval_steps
results["eval_loss"] = eval_loss
output_eval_file = os.path.join(eval_output_dir, "eval_results.txt")
with open(output_eval_file, "w") as writer:
for key in sorted(results.keys()):
writer.write("{} = {}\n".format(key, str(results[key])))
return results
def predict(self, to_predict):
"""
Performs predictions on a list of text.
Args:
to_predict: A python list of text (str) to be sent to the model for prediction. Note that the prefix should be prepended to the text.
Returns:
preds: A python list of the generated sequences.
""" # noqa: ignore flake8"
self._move_model_to_device()
all_outputs = []
# Batching
for batch in [
to_predict[i : i + self.args.eval_batch_size] for i in range(0, len(to_predict), self.args.eval_batch_size)
]:
if self.args.model_type == "marian":
input_ids = self.encoder_tokenizer.prepare_translation_batch(
batch, max_length=self.args.max_seq_length, pad_to_max_length=True, return_tensors="pt",
)["input_ids"]
else:
input_ids = self.encoder_tokenizer.batch_encode_plus(
batch, max_length=self.args.max_seq_length, pad_to_max_length=True, return_tensors="pt",
)["input_ids"]
input_ids = input_ids.to(self.device)
if self.args.model_type in ["bart", "marian"]:
outputs = self.model.generate(
input_ids=input_ids,
num_beams=self.args.num_beams,
max_length=self.args.max_length,
length_penalty=self.args.length_penalty,
early_stopping=self.args.early_stopping,
repetition_penalty=self.args.repetition_penalty,
do_sample=self.args.do_sample,
top_k=self.args.top_k,
top_p=self.args.top_p,
num_return_sequences=self.args.num_return_sequences,
)
else:
outputs = self.model.generate(
input_ids=input_ids,
decoder_start_token_id=self.model.config.decoder.pad_token_id,
num_beams=self.args.num_beams,
max_length=self.args.max_length,
length_penalty=self.args.length_penalty,
early_stopping=self.args.early_stopping,
repetition_penalty=self.args.repetition_penalty,
do_sample=self.args.do_sample,
top_k=self.args.top_k,
top_p=self.args.top_p,
num_return_sequences=self.args.num_return_sequences,
)
all_outputs.extend(outputs.cpu().numpy())
if self.args.use_multiprocessed_decoding:
self.model.to("cpu")
with Pool(self.args.process_count) as p:
outputs = list(
tqdm(
p.imap(self._decode, all_outputs, chunksize=self.args.multiprocessing_chunksize),
total=len(all_outputs),
desc="Decoding outputs",
disable=self.args.silent,
)
)
self._move_model_to_device()
else:
outputs = [
self.decoder_tokenizer.decode(output_id, skip_special_tokens=True, clean_up_tokenization_spaces=True)
for output_id in all_outputs
]
if self.args.num_return_sequences > 1:
return [
outputs[i : i + self.args.num_return_sequences]
for i in range(0, len(outputs), self.args.num_return_sequences)
]
else:
return outputs
def _decode(self, output_id):
return self.decoder_tokenizer.decode(output_id, skip_special_tokens=True, clean_up_tokenization_spaces=True)
def compute_metrics(self, labels, preds, **kwargs):
"""
Computes the evaluation metrics for the model predictions.
Args:
labels: List of target sequences
preds: List of model generated outputs
**kwargs: Custom metrics that should be used. Pass in the metrics as keyword arguments (name of metric: function to use).
A metric function should take in two parameters. The first parameter will be the true labels, and the second parameter will be the predictions. Both inputs
will be lists of strings. Note that this will slow down evaluation significantly as the predicted sequences need to be generated.
Returns:
result: Dictionary containing evaluation results.
""" # noqa: ignore flake8"
# assert len(labels) == len(preds)
results = {}
for metric, func in kwargs.items():
results[metric] = func(labels, preds)
return results
def load_and_cache_examples(self, data, evaluate=False, no_cache=False, verbose=True, silent=False):
"""
Creates a T5Dataset from data.
Utility function for train() and eval() methods. Not intended to be used directly.
"""
encoder_tokenizer = self.encoder_tokenizer
decoder_tokenizer = self.decoder_tokenizer
args = self.args
if not no_cache:
no_cache = args.no_cache
if not no_cache:
os.makedirs(self.args.cache_dir, exist_ok=True)
mode = "dev" if evaluate else "train"
if args.dataset_class:
CustomDataset = args.dataset_class
return CustomDataset(encoder_tokenizer, decoder_tokenizer, args, data, mode)
else:
if args.model_type in ["bart", "marian"]:
return SimpleSummarizationDataset(encoder_tokenizer, self.args, data, mode)
else:
return Seq2SeqDataset(encoder_tokenizer, decoder_tokenizer, self.args, data, mode,)
def _create_training_progress_scores(self, **kwargs):
extra_metrics = {key: [] for key in kwargs}
training_progress_scores = {
"global_step": [],
"eval_loss": [],
"train_loss": [],
**extra_metrics,
}
return training_progress_scores
def _get_last_metrics(self, metric_values):
return {metric: values[-1] for metric, values in metric_values.items()}
def _save_model(self, output_dir=None, optimizer=None, scheduler=None, model=None, results=None):
if not output_dir:
output_dir = self.args.output_dir
os.makedirs(output_dir, exist_ok=True)
logger.info(f"Saving model into {output_dir}")
if model and not self.args.no_save:
# Take care of distributed/parallel training
model_to_save = model.module if hasattr(model, "module") else model
self._save_model_args(output_dir)
if self.args.model_type in ["bart", "marian"]:
os.makedirs(os.path.join(output_dir), exist_ok=True)
model_to_save.save_pretrained(output_dir)
self.config.save_pretrained(output_dir)
if self.args.model_type == "bart":
self.encoder_tokenizer.save_pretrained(output_dir)
else:
os.makedirs(os.path.join(output_dir, "encoder"), exist_ok=True)
os.makedirs(os.path.join(output_dir, "decoder"), exist_ok=True)
self.encoder_config.save_pretrained(os.path.join(output_dir, "encoder"))
self.decoder_config.save_pretrained(os.path.join(output_dir, "decoder"))
model_to_save = (
self.model.encoder.module if hasattr(self.model.encoder, "module") else self.model.encoder
)
model_to_save.save_pretrained(os.path.join(output_dir, "encoder"))
model_to_save = (
self.model.decoder.module if hasattr(self.model.decoder, "module") else self.model.decoder
)
model_to_save.save_pretrained(os.path.join(output_dir, "decoder"))
self.encoder_tokenizer.save_pretrained(os.path.join(output_dir, "encoder"))
self.decoder_tokenizer.save_pretrained(os.path.join(output_dir, "decoder"))
torch.save(self.args, os.path.join(output_dir, "training_args.bin"))
if optimizer and scheduler and self.args.save_optimizer_and_scheduler:
torch.save(optimizer.state_dict(), os.path.join(output_dir, "optimizer.pt"))
torch.save(scheduler.state_dict(), os.path.join(output_dir, "scheduler.pt"))
if results:
output_eval_file = os.path.join(output_dir, "eval_results.txt")
with open(output_eval_file, "w") as writer:
for key in sorted(results.keys()):
writer.write("{} = {}\n".format(key, str(results[key])))
def _move_model_to_device(self):
self.model.to(self.device)
def _get_inputs_dict(self, batch):
device = self.device
if self.args.model_type in ["bart", "marian"]:
pad_token_id = self.encoder_tokenizer.pad_token_id
source_ids, source_mask, y = batch["source_ids"], batch["source_mask"], batch["target_ids"]
y_ids = y[:, :-1].contiguous()
lm_labels = y[:, 1:].clone()
lm_labels[y[:, 1:] == pad_token_id] = -100
inputs = {
"input_ids": source_ids.to(device),
"attention_mask": source_mask.to(device),
"decoder_input_ids": y_ids.to(device),
"lm_labels": lm_labels.to(device),
}
else:
lm_labels = batch[1]
lm_labels_masked = lm_labels.clone()
lm_labels_masked[lm_labels_masked == self.decoder_tokenizer.pad_token_id] = -100
inputs = {
"input_ids": batch[0].to(device),
"decoder_input_ids": lm_labels.to(device),
"labels": lm_labels_masked.to(device),
}
return inputs
def _save_model_args(self, output_dir):
os.makedirs(output_dir, exist_ok=True)
self.args.save(output_dir)
def _load_model_args(self, input_dir):
args = Seq2SeqArgs()
args.load(input_dir)
return args
def get_named_parameters(self):
return [n for n, p in self.model.named_parameters()]
| simpletransformers/seq2seq/seq2seq_model.py | 46,925 | Initializes a Seq2SeqModel.
Args:
encoder_type (optional): The type of model to use as the encoder.
encoder_name (optional): The exact architecture and trained weights to use. This may be a Hugging Face Transformers compatible pre-trained model, a community model, or the path to a directory containing model files.
decoder_name (optional): The exact architecture and trained weights to use. This may be a Hugging Face Transformers compatible pre-trained model, a community model, or the path to a directory containing model files.
Must be the same "size" as the encoder model (base/base, large/large, etc.)
encoder_decoder_type (optional): The type of encoder-decoder model. (E.g. bart)
encoder_decoder_name (optional): The path to a directory containing the saved encoder and decoder of a Seq2SeqModel. (E.g. "outputs/") OR a valid BART or MarianMT model.
config (optional): A configuration file to build an EncoderDecoderModel.
args (optional): Default args will be used if this parameter is not provided. If provided, it should be a dict containing the args that should be changed in the default args.
use_cuda (optional): Use GPU if available. Setting to False will force model to use CPU only.
cuda_device (optional): Specific GPU that should be used. Will use the first available GPU by default.
**kwargs (optional): For providing proxies, force_download, resume_download, cache_dir and other options specific to the 'from_pretrained' implementation where this will be supplied.
Computes the evaluation metrics for the model predictions.
Args:
labels: List of target sequences
preds: List of model generated outputs
**kwargs: Custom metrics that should be used. Pass in the metrics as keyword arguments (name of metric: function to use).
A metric function should take in two parameters. The first parameter will be the true labels, and the second parameter will be the predictions. Both inputs
will be lists of strings. Note that this will slow down evaluation significantly as the predicted sequences need to be generated.
Returns:
result: Dictionary containing evaluation results.
Evaluates the model on eval_data. Saves results to output_dir.
Args:
eval_data: Pandas DataFrame containing the 2 columns - `input_text`, `target_text`.
- `input_text`: The input text sequence.
- `target_text`: The target text sequence.
output_dir: The directory where model files will be saved. If not given, self.args.output_dir will be used.
verbose: If verbose, results will be printed to the console on completion of evaluation.
silent: If silent, tqdm progress bars will be hidden.
**kwargs: Additional metrics that should be used. Pass in the metrics as keyword arguments (name of metric: function to use).
A metric function should take in two parameters. The first parameter will be the true labels, and the second parameter will be the predictions. Both inputs
will be lists of strings. Note that this will slow down evaluation significantly as the predicted sequences need to be generated.
Returns:
results: Dictionary containing evaluation results.
Evaluates the model on eval_dataset.
Utility function to be used by the eval_model() method. Not intended to be used directly.
Creates a T5Dataset from data.
Utility function for train() and eval() methods. Not intended to be used directly.
Performs predictions on a list of text.
Args:
to_predict: A python list of text (str) to be sent to the model for prediction. Note that the prefix should be prepended to the text.
Returns:
preds: A python list of the generated sequences.
Trains the model on train_dataset.
Utility function to be used by the train_model() method. Not intended to be used directly.
Trains the model using 'train_data'
Args:
train_data: Pandas DataFrame containing the 2 columns - `input_text`, `target_text`.
- `input_text`: The input text sequence.
- `target_text`: The target text sequence
output_dir: The directory where model files will be saved. If not given, self.args.output_dir will be used.
show_running_loss (optional): Set to False to prevent running loss from being printed to console. Defaults to True.
args (optional): Optional changes to the args dict of the model. Any changes made will persist for the model.
eval_data (optional): A DataFrame against which evaluation will be performed when evaluate_during_training is enabled. Is required if evaluate_during_training is enabled.
**kwargs: Additional metrics that should be used. Pass in the metrics as keyword arguments (name of metric: function to use).
A metric function should take in two parameters. The first parameter will be the true labels, and the second parameter will be the predictions. Both inputs
will be lists of strings. Note that this will slow down training significantly as the predicted sequences need to be generated.
Returns:
None
noqa: ignore flake8" if not ((encoder_name and decoder_name) or encoder_decoder_name) and not encoder_type: config = EncoderDecoderConfig.from_encoder_decoder_configs(config, config) self.model = EncoderDecoderModel.from_pretrained(encoder_decoder_name) Checking if we are loading from a saved model or using a pre-trained model if not saved_model_args and encoder_decoder_type == "marian": Need to store base pre-trained model name to get the tokenizer when loading a saved model noqa: ignore flake8" if self.args.silent: show_running_loss = False model_to_save = self.model.module if hasattr(self.model, "module") else self.model model_to_save.save_pretrained(output_dir) self.encoder_tokenizer.save_pretrained(output_dir) self.decoder_tokenizer.save_pretrained(output_dir) torch.save(self.args, os.path.join(output_dir, "training_args.bin")) TODO: Use custom optimizer like with BertSum? Load in optimizer and scheduler states set global_step to gobal_step of last saved checkpoint from model path batch = tuple(t.to(device) for t in batch) model outputs are always tuple in pytorch-transformers (see doc) model outputs are always tuple in pytorch-transformers (see doc) mean() to average on multi-gpu parallel training Update learning rate schedule Log metrics Save model checkpoint Only evaluate when single GPU otherwise metrics may not average well noqa: ignore flake8" batch = tuple(t.to(device) for t in batch) noqa: ignore flake8" Batching noqa: ignore flake8" assert len(labels) == len(preds) Take care of distributed/parallel training | 6,662 | en | 0.662834 |
from django.test import TestCase
from django.contrib.auth import get_user_model
class ModelTest(TestCase):
def test_create_user_with_email_successful(self):
"""이메일로 유저 생성을 성공하는 테스트"""
email = 'test@testemail.com'
password = 'testpassword'
user = get_user_model().objects.create_user(
email=email,
password=password
)
self.assertEqual(user.email, email)
self.assertTrue(user.check_password(password))
def test_new_user_email_normalized(self):
"""이메일이 표준 형식으로 들어오는 테스트"""
email = 'test@TESTEMAIL.COM'
user = get_user_model().objects.create_user(email, 'testpw123')
self.assertEqual(user.email, email.lower())
def test_new_user_missing_email(self):
"""이메일이 입력되지 않았을 때 에러가 발생하는 테스트"""
with self.assertRaises(ValueError):
get_user_model().objects.create_user(None, 'testpw123')
def test_create_new_superuser(self):
"""Superuser를 생성하는 테스트"""
user = get_user_model().objects.create_superuser(
'testsuperuser@admin.com',
'testpw123'
)
self.assertTrue(user.is_superuser)
self.assertTrue(user.is_staff)
| shoppingmall/core/tests/test_models.py | 1,352 | Superuser를 생성하는 테스트
이메일로 유저 생성을 성공하는 테스트
이메일이 표준 형식으로 들어오는 테스트
이메일이 입력되지 않았을 때 에러가 발생하는 테스트 | 91 | ko | 1.00007 |
# Generated by Django 2.1.2 on 2019-02-05 08:07
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('core', '0038_merge_20190203_1423'),
('core', '0039_auto_20190205_0609'),
]
operations = [
]
| src/core/migrations/0040_merge_20190205_0807.py | 268 | Generated by Django 2.1.2 on 2019-02-05 08:07 | 45 | en | 0.579368 |
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Entry point for both build and try bots.
This script is invoked from XXX, usually without arguments
to package an SDK. It automatically determines whether
this SDK is for mac, win, linux.
The script inspects the following environment variables:
BUILDBOT_BUILDERNAME to determine whether the script is run locally
and whether it should upload an SDK to file storage (GSTORE)
"""
# pylint: disable=W0621
# std python includes
import datetime
import glob
import optparse
import os
import re
import sys
if sys.version_info < (2, 6, 0):
sys.stderr.write("python 2.6 or later is required run this script\n")
sys.exit(1)
# local includes
import buildbot_common
import build_projects
import build_updater
import build_version
import generate_make
import generate_notice
import manifest_util
import parse_dsc
import verify_filelist
from build_paths import SCRIPT_DIR, SDK_SRC_DIR, SRC_DIR, NACL_DIR, OUT_DIR
from build_paths import NACLPORTS_DIR, GSTORE, GONACL_APPENGINE_SRC_DIR
# Add SDK make tools scripts to the python path.
sys.path.append(os.path.join(SDK_SRC_DIR, 'tools'))
sys.path.append(os.path.join(NACL_DIR, 'build'))
import getos
import oshelpers
CYGTAR = os.path.join(NACL_DIR, 'build', 'cygtar.py')
NACLPORTS_URL = 'https://naclports.googlecode.com/svn/trunk/src'
NACLPORTS_REV = 1152
GYPBUILD_DIR = 'gypbuild'
options = None
def GetGlibcToolchain():
tcdir = os.path.join(NACL_DIR, 'toolchain', '.tars')
tcname = 'toolchain_%s_x86.tar.bz2' % getos.GetPlatform()
return os.path.join(tcdir, tcname)
def GetNewlibToolchain():
tcdir = os.path.join(NACL_DIR, 'toolchain', '.tars')
tcname = 'naclsdk_%s_x86.tgz' % getos.GetPlatform()
return os.path.join(tcdir, tcname)
def GetBionicToolchain():
tcdir = os.path.join(NACL_DIR, 'toolchain', '.tars')
tcname = 'naclsdk_%s_arm_bionic.tgz' % getos.GetPlatform()
return os.path.join(tcdir, tcname)
def GetPNaClToolchain():
tcdir = os.path.join(NACL_DIR, 'toolchain', '.tars')
tcname = 'naclsdk_pnacl_%s_x86.tgz' % getos.GetPlatform()
return os.path.join(tcdir, tcname)
def GetToolchainNaClInclude(tcname, tcpath, arch):
if arch == 'x86':
if tcname == 'pnacl':
return os.path.join(tcpath, 'sdk', 'include')
return os.path.join(tcpath, 'x86_64-nacl', 'include')
elif arch == 'arm':
return os.path.join(tcpath, 'arm-nacl', 'include')
else:
buildbot_common.ErrorExit('Unknown architecture: %s' % arch)
def GetGypGenDir(xarch):
if xarch == 'arm':
build_dir = GYPBUILD_DIR + '-arm'
else:
build_dir = GYPBUILD_DIR
return os.path.join(OUT_DIR, build_dir, 'Release', 'gen')
def GetGypBuiltLib(tcname, xarch=None):
if tcname == 'pnacl':
tcname = 'pnacl_newlib'
if not xarch:
xarch = ''
return os.path.join(GetGypGenDir(xarch), 'tc_' + tcname, 'lib' + xarch)
def GetToolchainNaClLib(tcname, tcpath, xarch):
if tcname == 'pnacl':
return os.path.join(tcpath, 'sdk', 'lib')
elif xarch == '32':
return os.path.join(tcpath, 'x86_64-nacl', 'lib32')
elif xarch == '64':
return os.path.join(tcpath, 'x86_64-nacl', 'lib')
elif xarch == 'arm':
return os.path.join(tcpath, 'arm-nacl', 'lib')
def GetToolchainDirName(tcname, xarch):
if tcname == 'pnacl':
return '%s_%s' % (getos.GetPlatform(), tcname)
elif xarch == 'arm':
return '%s_arm_%s' % (getos.GetPlatform(), tcname)
else:
return '%s_x86_%s' % (getos.GetPlatform(), tcname)
def GetGypToolchainLib(tcname, xarch):
tcpath = os.path.join(GetGypGenDir(xarch), 'sdk', 'toolchain',
GetToolchainDirName(tcname, xarch))
return GetToolchainNaClLib(tcname, tcpath, xarch)
def GetOutputToolchainLib(pepperdir, tcname, xarch):
tcpath = os.path.join(pepperdir, 'toolchain',
GetToolchainDirName(tcname, xarch))
return GetToolchainNaClLib(tcname, tcpath, xarch)
def GetPNaClNativeLib(tcpath, arch):
if arch not in ['arm', 'x86-32', 'x86-64']:
buildbot_common.ErrorExit('Unknown architecture %s.' % arch)
return os.path.join(tcpath, 'lib-' + arch)
def BuildStepDownloadToolchains(toolchains):
buildbot_common.BuildStep('Running download_toolchains.py')
download_script = os.path.join('build', 'download_toolchains.py')
args = [sys.executable, download_script, '--no-arm-trusted',
'--arm-untrusted', '--keep']
if 'bionic' in toolchains:
args.append('--allow-bionic')
buildbot_common.Run(args, cwd=NACL_DIR)
def BuildStepCleanPepperDirs(pepperdir, pepperdir_old):
buildbot_common.BuildStep('Clean Pepper Dirs')
buildbot_common.RemoveDir(pepperdir_old)
buildbot_common.RemoveDir(pepperdir)
buildbot_common.MakeDir(pepperdir)
def BuildStepMakePepperDirs(pepperdir, subdirs):
for subdir in subdirs:
buildbot_common.MakeDir(os.path.join(pepperdir, subdir))
TEXT_FILES = [
'AUTHORS',
'COPYING',
'LICENSE',
'README.Makefiles',
'getting_started/README',
]
def BuildStepCopyTextFiles(pepperdir, pepper_ver, chrome_revision,
nacl_revision):
buildbot_common.BuildStep('Add Text Files')
InstallFiles(SDK_SRC_DIR, pepperdir, TEXT_FILES)
# Replace a few placeholders in README
readme_text = open(os.path.join(SDK_SRC_DIR, 'README')).read()
readme_text = readme_text.replace('${VERSION}', pepper_ver)
readme_text = readme_text.replace('${CHROME_REVISION}', chrome_revision)
readme_text = readme_text.replace('${NACL_REVISION}', nacl_revision)
# Year/Month/Day Hour:Minute:Second
time_format = '%Y/%m/%d %H:%M:%S'
readme_text = readme_text.replace('${DATE}',
datetime.datetime.now().strftime(time_format))
open(os.path.join(pepperdir, 'README'), 'w').write(readme_text)
def BuildStepUntarToolchains(pepperdir, toolchains):
buildbot_common.BuildStep('Untar Toolchains')
platform = getos.GetPlatform()
tmpdir = os.path.join(OUT_DIR, 'tc_temp')
buildbot_common.RemoveDir(tmpdir)
buildbot_common.MakeDir(tmpdir)
if 'newlib' in toolchains:
# Untar the newlib toolchains
tarfile = GetNewlibToolchain()
buildbot_common.Run([sys.executable, CYGTAR, '-C', tmpdir, '-xf', tarfile],
cwd=NACL_DIR)
# Then rename/move it to the pepper toolchain directory
srcdir = os.path.join(tmpdir, 'sdk', 'nacl-sdk')
tcname = platform + '_x86_newlib'
newlibdir = os.path.join(pepperdir, 'toolchain', tcname)
buildbot_common.Move(srcdir, newlibdir)
if 'bionic' in toolchains:
# Untar the bionic toolchains
tarfile = GetBionicToolchain()
tcname = platform + '_arm_bionic'
buildbot_common.Run([sys.executable, CYGTAR, '-C', tmpdir, '-xf', tarfile],
cwd=NACL_DIR)
srcdir = os.path.join(tmpdir, tcname)
bionicdir = os.path.join(pepperdir, 'toolchain', tcname)
buildbot_common.Move(srcdir, bionicdir)
if 'arm' in toolchains:
# Copy the existing arm toolchain from native_client tree
tcname = platform + '_arm_newlib'
arm_toolchain = os.path.join(NACL_DIR, 'toolchain', tcname)
arm_toolchain_sdk = os.path.join(pepperdir, 'toolchain',
os.path.basename(arm_toolchain))
buildbot_common.CopyDir(arm_toolchain, arm_toolchain_sdk)
if 'glibc' in toolchains:
# Untar the glibc toolchains
tarfile = GetGlibcToolchain()
tcname = platform + '_x86_glibc'
buildbot_common.Run([sys.executable, CYGTAR, '-C', tmpdir, '-xf', tarfile],
cwd=NACL_DIR)
# Then rename/move it to the pepper toolchain directory
srcdir = os.path.join(tmpdir, 'toolchain', platform + '_x86')
glibcdir = os.path.join(pepperdir, 'toolchain', tcname)
buildbot_common.Move(srcdir, glibcdir)
# Untar the pnacl toolchains
if 'pnacl' in toolchains:
tmpdir = os.path.join(tmpdir, 'pnacl')
buildbot_common.RemoveDir(tmpdir)
buildbot_common.MakeDir(tmpdir)
tarfile = GetPNaClToolchain()
tcname = platform + '_pnacl'
buildbot_common.Run([sys.executable, CYGTAR, '-C', tmpdir, '-xf', tarfile],
cwd=NACL_DIR)
# Then rename/move it to the pepper toolchain directory
pnacldir = os.path.join(pepperdir, 'toolchain', tcname)
buildbot_common.Move(tmpdir, pnacldir)
buildbot_common.RemoveDir(tmpdir)
if options.gyp and platform != 'win':
# If the gyp options is specified we install a toolchain
# wrapper so that gyp can switch toolchains via a commandline
# option.
bindir = os.path.join(pepperdir, 'toolchain', tcname, 'bin')
wrapper = os.path.join(SDK_SRC_DIR, 'tools', 'compiler-wrapper.py')
buildbot_common.MakeDir(bindir)
buildbot_common.CopyFile(wrapper, bindir)
# Module 'os' has no 'symlink' member (on Windows).
# pylint: disable=E1101
os.symlink('compiler-wrapper.py', os.path.join(bindir, 'i686-nacl-g++'))
os.symlink('compiler-wrapper.py', os.path.join(bindir, 'i686-nacl-gcc'))
os.symlink('compiler-wrapper.py', os.path.join(bindir, 'i686-nacl-ar'))
# List of toolchain headers to install.
# Source is relative to top of Chromium tree, destination is relative
# to the toolchain header directory.
NACL_HEADER_MAP = {
'newlib': [
('native_client/src/include/nacl/nacl_exception.h', 'nacl/'),
('native_client/src/include/nacl/nacl_minidump.h', 'nacl/'),
('native_client/src/untrusted/irt/irt.h', ''),
('native_client/src/untrusted/irt/irt_dev.h', ''),
('native_client/src/untrusted/nacl/nacl_dyncode.h', 'nacl/'),
('native_client/src/untrusted/nacl/nacl_startup.h', 'nacl/'),
('native_client/src/untrusted/nacl/nacl_thread.h', 'nacl/'),
('native_client/src/untrusted/pthread/pthread.h', ''),
('native_client/src/untrusted/pthread/semaphore.h', ''),
('native_client/src/untrusted/valgrind/dynamic_annotations.h', 'nacl/'),
('ppapi/nacl_irt/irt_ppapi.h', ''),
],
'glibc': [
('native_client/src/include/nacl/nacl_exception.h', 'nacl/'),
('native_client/src/include/nacl/nacl_minidump.h', 'nacl/'),
('native_client/src/untrusted/irt/irt.h', ''),
('native_client/src/untrusted/irt/irt_dev.h', ''),
('native_client/src/untrusted/nacl/nacl_dyncode.h', 'nacl/'),
('native_client/src/untrusted/nacl/nacl_startup.h', 'nacl/'),
('native_client/src/untrusted/nacl/nacl_thread.h', 'nacl/'),
('native_client/src/untrusted/valgrind/dynamic_annotations.h', 'nacl/'),
('ppapi/nacl_irt/irt_ppapi.h', ''),
],
'host': []
}
def InstallFiles(src_root, dest_root, file_list):
"""Copy a set of files from src_root to dest_root according
to the given mapping. This allows files to be copied from
to a location in the destination tree that is different to the
location in the source tree.
If the destination mapping ends with a '/' then the destination
basename is inherited from the the source file.
Wildcards can be used in the source list but it is not recommended
as this can end up adding things to the SDK unintentionally.
"""
for file_spec in file_list:
# The list of files to install can be a simple list of
# strings or a list of pairs, where each pair corresponds
# to a mapping from source to destination names.
if type(file_spec) == str:
src_file = dest_file = file_spec
else:
src_file, dest_file = file_spec
src_file = os.path.join(src_root, src_file)
# Expand sources files using glob.
sources = glob.glob(src_file)
if not sources:
sources = [src_file]
if len(sources) > 1 and not dest_file.endswith('/'):
buildbot_common.ErrorExit("Target file must end in '/' when "
"using globbing to install multiple files")
for source in sources:
if dest_file.endswith('/'):
dest = os.path.join(dest_file, os.path.basename(source))
else:
dest = dest_file
dest = os.path.join(dest_root, dest)
if not os.path.isdir(os.path.dirname(dest)):
buildbot_common.MakeDir(os.path.dirname(dest))
buildbot_common.CopyFile(source, dest)
def InstallNaClHeaders(tc_dst_inc, tc_name):
"""Copies NaCl headers to expected locations in the toolchain."""
if tc_name == 'arm':
# arm toolchain header should be the same as the x86 newlib
# ones
tc_name = 'newlib'
InstallFiles(SRC_DIR, tc_dst_inc, NACL_HEADER_MAP[tc_name])
def MakeNinjaRelPath(path):
return os.path.join(os.path.relpath(OUT_DIR, SRC_DIR), path)
TOOLCHAIN_LIBS = {
'newlib' : [
'crti.o',
'crtn.o',
'libminidump_generator.a',
'libnacl.a',
'libnacl_dyncode.a',
'libnacl_exception.a',
'libnacl_list_mappings.a',
'libnosys.a',
'libppapi.a',
'libppapi_stub.a',
'libpthread.a',
],
'glibc': [
'libminidump_generator.a',
'libminidump_generator.so',
'libnacl.a',
'libnacl_dyncode.a',
'libnacl_dyncode.so',
'libnacl_exception.a',
'libnacl_exception.so',
'libnacl_list_mappings.a',
'libnacl_list_mappings.so',
'libppapi.a',
'libppapi.so',
'libppapi_stub.a',
],
'pnacl': [
'libminidump_generator.a',
'libnacl.a',
'libnacl_dyncode.a',
'libnacl_exception.a',
'libnacl_list_mappings.a',
'libnosys.a',
'libppapi.a',
'libppapi_stub.a',
'libpthread.a',
]
}
def GypNinjaInstall(pepperdir, toolchains):
build_dir = GYPBUILD_DIR
ninja_out_dir = os.path.join(OUT_DIR, build_dir, 'Release')
tools_files = [
['sel_ldr', 'sel_ldr_x86_32'],
['ncval_new', 'ncval'],
['irt_core_newlib_x32.nexe', 'irt_core_x86_32.nexe'],
['irt_core_newlib_x64.nexe', 'irt_core_x86_64.nexe'],
]
platform = getos.GetPlatform()
# TODO(binji): dump_syms doesn't currently build on Windows. See
# http://crbug.com/245456
if platform != 'win':
tools_files += [
['dump_syms', 'dump_syms'],
['minidump_dump', 'minidump_dump'],
['minidump_stackwalk', 'minidump_stackwalk']
]
tools_files.append(['sel_ldr64', 'sel_ldr_x86_64'])
if platform == 'linux':
tools_files.append(['nacl_helper_bootstrap',
'nacl_helper_bootstrap_x86_32'])
tools_files.append(['nacl_helper_bootstrap64',
'nacl_helper_bootstrap_x86_64'])
buildbot_common.MakeDir(os.path.join(pepperdir, 'tools'))
# Add .exe extensions to all windows tools
for pair in tools_files:
if platform == 'win' and not pair[0].endswith('.nexe'):
pair[0] += '.exe'
pair[1] += '.exe'
InstallFiles(ninja_out_dir, os.path.join(pepperdir, 'tools'), tools_files)
for tc in set(toolchains) & set(['newlib', 'glibc', 'pnacl']):
if tc == 'pnacl':
xarches = (None,)
else:
xarches = ('arm', '32', '64')
for xarch in xarches:
if tc == 'glibc' and xarch == 'arm':
continue
src_dir = GetGypBuiltLib(tc, xarch)
dst_dir = GetOutputToolchainLib(pepperdir, tc, xarch)
InstallFiles(src_dir, dst_dir, TOOLCHAIN_LIBS[tc])
if tc != 'pnacl':
src_dir = GetGypToolchainLib(tc, xarch)
InstallFiles(src_dir, dst_dir, ['crt1.o'])
def GypNinjaBuild_NaCl(rel_out_dir):
gyp_py = os.path.join(NACL_DIR, 'build', 'gyp_nacl')
nacl_core_sdk_gyp = os.path.join(NACL_DIR, 'build', 'nacl_core_sdk.gyp')
all_gyp = os.path.join(NACL_DIR, 'build', 'all.gyp')
out_dir = MakeNinjaRelPath(rel_out_dir)
out_dir_arm = MakeNinjaRelPath(rel_out_dir + '-arm')
GypNinjaBuild('ia32', gyp_py, nacl_core_sdk_gyp, 'nacl_core_sdk', out_dir)
GypNinjaBuild('arm', gyp_py, nacl_core_sdk_gyp, 'nacl_core_sdk', out_dir_arm)
GypNinjaBuild('ia32', gyp_py, all_gyp, 'ncval_new', out_dir)
platform = getos.GetPlatform()
if platform == 'win':
NinjaBuild('sel_ldr64', out_dir)
else:
out_dir_64 = MakeNinjaRelPath(rel_out_dir + '-64')
GypNinjaBuild('x64', gyp_py, nacl_core_sdk_gyp, 'sel_ldr', out_dir_64)
# We only need sel_ldr from the 64-bit out directory.
# sel_ldr needs to be renamed, so we'll call it sel_ldr64.
files_to_copy = [('sel_ldr', 'sel_ldr64')]
if platform == 'linux':
files_to_copy.append(('nacl_helper_bootstrap', 'nacl_helper_bootstrap64'))
for src, dst in files_to_copy:
buildbot_common.CopyFile(
os.path.join(SRC_DIR, out_dir_64, 'Release', src),
os.path.join(SRC_DIR, out_dir, 'Release', dst))
def GypNinjaBuild_Breakpad(rel_out_dir):
# TODO(binji): dump_syms doesn't currently build on Windows. See
# http://crbug.com/245456
if getos.GetPlatform() == 'win':
return
gyp_py = os.path.join(SRC_DIR, 'build', 'gyp_chromium')
out_dir = MakeNinjaRelPath(rel_out_dir)
gyp_file = os.path.join(SRC_DIR, 'breakpad', 'breakpad.gyp')
build_list = ['dump_syms', 'minidump_dump', 'minidump_stackwalk']
GypNinjaBuild('ia32', gyp_py, gyp_file, build_list, out_dir)
def GypNinjaBuild_PPAPI(arch, rel_out_dir):
gyp_py = os.path.join(SRC_DIR, 'build', 'gyp_chromium')
out_dir = MakeNinjaRelPath(rel_out_dir)
gyp_file = os.path.join(SRC_DIR, 'ppapi', 'native_client',
'native_client.gyp')
GypNinjaBuild(arch, gyp_py, gyp_file, 'ppapi_lib', out_dir)
def GypNinjaBuild_Pnacl(rel_out_dir, target_arch):
# TODO(binji): This will build the pnacl_irt_shim twice; once as part of the
# Chromium build, and once here. When we move more of the SDK build process
# to gyp, we can remove this.
gyp_py = os.path.join(SRC_DIR, 'build', 'gyp_chromium')
out_dir = MakeNinjaRelPath(rel_out_dir)
gyp_file = os.path.join(SRC_DIR, 'ppapi', 'native_client', 'src',
'untrusted', 'pnacl_irt_shim', 'pnacl_irt_shim.gyp')
targets = ['pnacl_irt_shim_aot']
GypNinjaBuild(target_arch, gyp_py, gyp_file, targets, out_dir, False)
def GypNinjaBuild(arch, gyp_py_script, gyp_file, targets,
out_dir, force_arm_gcc=True):
gyp_env = dict(os.environ)
gyp_env['GYP_GENERATORS'] = 'ninja'
gyp_defines = []
if options.mac_sdk:
gyp_defines.append('mac_sdk=%s' % options.mac_sdk)
if arch:
gyp_defines.append('target_arch=%s' % arch)
if arch == 'arm':
if getos.GetPlatform() == 'linux':
gyp_env['CC'] = 'arm-linux-gnueabihf-gcc'
gyp_env['CXX'] = 'arm-linux-gnueabihf-g++'
gyp_env['AR'] = 'arm-linux-gnueabihf-ar'
gyp_env['AS'] = 'arm-linux-gnueabihf-as'
gyp_env['CC_host'] = 'cc'
gyp_env['CXX_host'] = 'c++'
gyp_defines += ['armv7=1', 'arm_thumb=0', 'arm_neon=1',
'arm_float_abi=hard']
if force_arm_gcc:
gyp_defines.append('nacl_enable_arm_gcc=1')
if getos.GetPlatform() == 'mac':
gyp_defines.append('clang=1')
gyp_env['GYP_DEFINES'] = ' '.join(gyp_defines)
for key in ['GYP_GENERATORS', 'GYP_DEFINES', 'CC']:
value = gyp_env.get(key)
if value is not None:
print '%s="%s"' % (key, value)
gyp_generator_flags = ['-G', 'output_dir=%s' % (out_dir,)]
gyp_depth = '--depth=.'
buildbot_common.Run(
[sys.executable, gyp_py_script, gyp_file, gyp_depth] + \
gyp_generator_flags,
cwd=SRC_DIR,
env=gyp_env)
NinjaBuild(targets, out_dir)
def NinjaBuild(targets, out_dir):
if type(targets) is not list:
targets = [targets]
out_config_dir = os.path.join(out_dir, 'Release')
buildbot_common.Run(['ninja', '-C', out_config_dir] + targets, cwd=SRC_DIR)
def BuildStepBuildToolchains(pepperdir, toolchains):
buildbot_common.BuildStep('SDK Items')
GypNinjaBuild_NaCl(GYPBUILD_DIR)
GypNinjaBuild_Breakpad(GYPBUILD_DIR)
platform = getos.GetPlatform()
newlibdir = os.path.join(pepperdir, 'toolchain', platform + '_x86_newlib')
glibcdir = os.path.join(pepperdir, 'toolchain', platform + '_x86_glibc')
armdir = os.path.join(pepperdir, 'toolchain', platform + '_arm_newlib')
pnacldir = os.path.join(pepperdir, 'toolchain', platform + '_pnacl')
if set(toolchains) & set(['glibc', 'newlib']):
GypNinjaBuild_PPAPI('ia32', GYPBUILD_DIR)
if 'arm' in toolchains:
GypNinjaBuild_PPAPI('arm', GYPBUILD_DIR + '-arm')
GypNinjaInstall(pepperdir, toolchains)
if 'newlib' in toolchains:
InstallNaClHeaders(GetToolchainNaClInclude('newlib', newlibdir, 'x86'),
'newlib')
if 'glibc' in toolchains:
InstallNaClHeaders(GetToolchainNaClInclude('glibc', glibcdir, 'x86'),
'glibc')
if 'arm' in toolchains:
InstallNaClHeaders(GetToolchainNaClInclude('newlib', armdir, 'arm'),
'arm')
if 'pnacl' in toolchains:
# NOTE: For ia32, gyp builds both x86-32 and x86-64 by default.
for arch in ('ia32', 'arm'):
# Fill in the latest native pnacl shim library from the chrome build.
build_dir = GYPBUILD_DIR + '-pnacl-' + arch
GypNinjaBuild_Pnacl(build_dir, arch)
if arch == 'ia32':
nacl_arches = ['x86-32', 'x86-64']
elif arch == 'arm':
nacl_arches = ['arm']
else:
buildbot_common.ErrorExit('Unknown architecture: %s' % arch)
for nacl_arch in nacl_arches:
release_build_dir = os.path.join(OUT_DIR, build_dir, 'Release',
'gen', 'tc_pnacl_translate',
'lib-' + nacl_arch)
buildbot_common.CopyFile(
os.path.join(release_build_dir, 'libpnacl_irt_shim.a'),
GetPNaClNativeLib(pnacldir, nacl_arch))
InstallNaClHeaders(GetToolchainNaClInclude('pnacl', pnacldir, 'x86'),
'newlib')
def MakeDirectoryOrClobber(pepperdir, dirname, clobber):
dirpath = os.path.join(pepperdir, dirname)
if clobber:
buildbot_common.RemoveDir(dirpath)
buildbot_common.MakeDir(dirpath)
return dirpath
def BuildStepUpdateHelpers(pepperdir, clobber):
buildbot_common.BuildStep('Update project helpers')
build_projects.UpdateHelpers(pepperdir, clobber=clobber)
def BuildStepUpdateUserProjects(pepperdir, toolchains,
build_experimental, clobber):
buildbot_common.BuildStep('Update examples and libraries')
filters = {}
if not build_experimental:
filters['EXPERIMENTAL'] = False
if toolchains:
toolchains = toolchains[:]
# arm isn't a valid toolchain for build_projects
if 'arm' in toolchains:
toolchains.remove('arm')
if 'host' in toolchains:
toolchains.remove('host')
toolchains.append(getos.GetPlatform())
filters['TOOLS'] = toolchains
# Update examples and libraries
filters['DEST'] = [
'getting_started',
'examples/api',
'examples/demo',
'examples/tutorial',
'src'
]
tree = parse_dsc.LoadProjectTree(SDK_SRC_DIR, include=filters)
build_projects.UpdateProjects(pepperdir, tree, clobber=clobber,
toolchains=toolchains)
def BuildStepMakeAll(pepperdir, directory, step_name,
deps=True, clean=False, config='Debug', args=None):
buildbot_common.BuildStep(step_name)
build_projects.BuildProjectsBranch(pepperdir, directory, clean,
deps, config, args)
def BuildStepBuildLibraries(pepperdir, directory):
BuildStepMakeAll(pepperdir, directory, 'Build Libraries Debug',
clean=True, config='Debug')
BuildStepMakeAll(pepperdir, directory, 'Build Libraries Release',
clean=True, config='Release')
# Cleanup .pyc file generated while building libraries. Without
# this we would end up shipping the pyc in the SDK tarball.
buildbot_common.RemoveFile(os.path.join(pepperdir, 'tools', '*.pyc'))
def GenerateNotice(fileroot, output_filename='NOTICE', extra_files=None):
# Look for LICENSE files
license_filenames_re = re.compile('LICENSE|COPYING|COPYRIGHT')
license_files = []
for root, _, files in os.walk(fileroot):
for filename in files:
if license_filenames_re.match(filename):
path = os.path.join(root, filename)
license_files.append(path)
if extra_files:
license_files += [os.path.join(fileroot, f) for f in extra_files]
print '\n'.join(license_files)
if not os.path.isabs(output_filename):
output_filename = os.path.join(fileroot, output_filename)
generate_notice.Generate(output_filename, fileroot, license_files)
def BuildStepVerifyFilelist(pepperdir):
buildbot_common.BuildStep('Verify SDK Files')
file_list_path = os.path.join(SCRIPT_DIR, 'sdk_files.list')
try:
verify_filelist.Verify(file_list_path, pepperdir)
print 'OK'
except verify_filelist.ParseException, e:
buildbot_common.ErrorExit('Parsing sdk_files.list failed:\n\n%s' % e)
except verify_filelist.VerifyException, e:
file_list_rel = os.path.relpath(file_list_path)
verify_filelist_py = os.path.splitext(verify_filelist.__file__)[0] + '.py'
verify_filelist_py = os.path.relpath(verify_filelist_py)
pepperdir_rel = os.path.relpath(pepperdir)
msg = """\
SDK verification failed:
%s
Add/remove files from %s to fix.
Run:
./%s %s %s
to test.""" % (e, file_list_rel, verify_filelist_py, file_list_rel,
pepperdir_rel)
buildbot_common.ErrorExit(msg)
def BuildStepTarBundle(pepper_ver, tarfile):
buildbot_common.BuildStep('Tar Pepper Bundle')
buildbot_common.MakeDir(os.path.dirname(tarfile))
buildbot_common.Run([sys.executable, CYGTAR, '-C', OUT_DIR, '-cjf', tarfile,
'pepper_' + pepper_ver], cwd=NACL_DIR)
def GetManifestBundle(pepper_ver, chrome_revision, nacl_revision, tarfile,
archive_url):
with open(tarfile, 'rb') as tarfile_stream:
archive_sha1, archive_size = manifest_util.DownloadAndComputeHash(
tarfile_stream)
archive = manifest_util.Archive(manifest_util.GetHostOS())
archive.url = archive_url
archive.size = archive_size
archive.checksum = archive_sha1
bundle = manifest_util.Bundle('pepper_' + pepper_ver)
bundle.revision = int(chrome_revision)
bundle.repath = 'pepper_' + pepper_ver
bundle.version = int(pepper_ver)
bundle.description = (
'Chrome %s bundle. Chrome revision: %s. NaCl revision: %s' % (
pepper_ver, chrome_revision, nacl_revision))
bundle.stability = 'dev'
bundle.recommended = 'no'
bundle.archives = [archive]
return bundle
def BuildStepArchiveBundle(name, pepper_ver, chrome_revision, nacl_revision,
tarfile):
buildbot_common.BuildStep('Archive %s' % name)
bucket_path = 'nativeclient-mirror/nacl/nacl_sdk/%s' % (
build_version.ChromeVersion(),)
tarname = os.path.basename(tarfile)
tarfile_dir = os.path.dirname(tarfile)
buildbot_common.Archive(tarname, bucket_path, tarfile_dir)
# generate "manifest snippet" for this archive.
archive_url = GSTORE + 'nacl_sdk/%s/%s' % (
build_version.ChromeVersion(), tarname)
bundle = GetManifestBundle(pepper_ver, chrome_revision, nacl_revision,
tarfile, archive_url)
manifest_snippet_file = os.path.join(OUT_DIR, tarname + '.json')
with open(manifest_snippet_file, 'wb') as manifest_snippet_stream:
manifest_snippet_stream.write(bundle.GetDataAsString())
buildbot_common.Archive(tarname + '.json', bucket_path, OUT_DIR,
step_link=False)
def BuildStepArchiveSDKTools():
# Only push up sdk_tools.tgz and nacl_sdk.zip on the linux buildbot.
builder_name = os.getenv('BUILDBOT_BUILDERNAME', '')
if builder_name == 'linux-sdk-multi':
buildbot_common.BuildStep('Build SDK Tools')
build_updater.BuildUpdater(OUT_DIR)
buildbot_common.BuildStep('Archive SDK Tools')
bucket_path = 'nativeclient-mirror/nacl/nacl_sdk/%s' % (
build_version.ChromeVersion(),)
buildbot_common.Archive('sdk_tools.tgz', bucket_path, OUT_DIR,
step_link=False)
buildbot_common.Archive('nacl_sdk.zip', bucket_path, OUT_DIR,
step_link=False)
def BuildStepSyncNaClPorts():
"""Pull the pinned revision of naclports from SVN."""
buildbot_common.BuildStep('Sync naclports')
if not os.path.exists(NACLPORTS_DIR):
# checkout new copy of naclports
cmd = ['svn', 'checkout', '-q', '-r', str(NACLPORTS_REV), NACLPORTS_URL,
'naclports']
buildbot_common.Run(cmd, cwd=os.path.dirname(NACLPORTS_DIR))
else:
# sync existing copy to pinned revision.
cmd = ['svn', 'update', '-r', str(NACLPORTS_REV)]
buildbot_common.Run(cmd, cwd=NACLPORTS_DIR)
def BuildStepBuildNaClPorts(pepper_ver, pepperdir):
"""Build selected naclports in all configurations."""
# TODO(sbc): currently naclports doesn't know anything about
# Debug builds so the Debug subfolders are all empty.
env = dict(os.environ)
env['NACL_SDK_ROOT'] = pepperdir
env['PEPPER_DIR'] = os.path.basename(pepperdir) # pepper_NN
env['NACLPORTS_NO_ANNOTATE'] = "1"
env['NACLPORTS_NO_UPLOAD'] = "1"
build_script = 'build_tools/naclports-linux-sdk-bundle.sh'
buildbot_common.BuildStep('Build naclports')
bundle_dir = os.path.join(NACLPORTS_DIR, 'out', 'sdk_bundle')
out_dir = os.path.join(bundle_dir, 'pepper_%s' % pepper_ver)
# Remove the sdk_bundle directory to remove stale files from previous builds.
buildbot_common.RemoveDir(bundle_dir)
buildbot_common.Run([build_script], env=env, cwd=NACLPORTS_DIR)
# Some naclports do not include a standalone LICENSE/COPYING file
# so we explicitly list those here for inclusion.
extra_licenses = ('tinyxml/readme.txt',
'jpeg-8d/README',
'zlib-1.2.3/README')
src_root = os.path.join(NACLPORTS_DIR, 'out', 'repository-i686')
output_license = os.path.join(out_dir, 'ports', 'LICENSE')
GenerateNotice(src_root , output_license, extra_licenses)
readme = os.path.join(out_dir, 'ports', 'README')
oshelpers.Copy(['-v', os.path.join(SDK_SRC_DIR, 'README.naclports'), readme])
def BuildStepTarNaClPorts(pepper_ver, tarfile):
"""Create tar archive containing headers and libs from naclports build."""
buildbot_common.BuildStep('Tar naclports Bundle')
buildbot_common.MakeDir(os.path.dirname(tarfile))
pepper_dir = 'pepper_%s' % pepper_ver
archive_dirs = [os.path.join(pepper_dir, 'ports')]
ports_out = os.path.join(NACLPORTS_DIR, 'out', 'sdk_bundle')
cmd = [sys.executable, CYGTAR, '-C', ports_out, '-cjf', tarfile]
cmd += archive_dirs
buildbot_common.Run(cmd, cwd=NACL_DIR)
def BuildStepBuildAppEngine(pepperdir, chrome_revision):
"""Build the projects found in src/gonacl_appengine/src"""
buildbot_common.BuildStep('Build GoNaCl AppEngine Projects')
cmd = ['make', 'upload', 'REVISION=%s' % chrome_revision]
env = dict(os.environ)
env['NACL_SDK_ROOT'] = pepperdir
env['NACLPORTS_NO_ANNOTATE'] = "1"
buildbot_common.Run(cmd, env=env, cwd=GONACL_APPENGINE_SRC_DIR)
def main(args):
parser = optparse.OptionParser(description=__doc__)
parser.add_option('--bionic', help='Add bionic build.',
action='store_true')
parser.add_option('--tar', help='Force the tar step.',
action='store_true')
parser.add_option('--archive', help='Force the archive step.',
action='store_true')
parser.add_option('--gyp',
help='Use gyp to build examples/libraries/Makefiles.',
action='store_true')
parser.add_option('--release', help='PPAPI release version.',
dest='release', default=None)
parser.add_option('--build-ports',
help='Build naclport bundle.', action='store_true')
parser.add_option('--build-app-engine',
help='Build AppEngine demos.', action='store_true')
parser.add_option('--experimental',
help='build experimental examples and libraries', action='store_true',
dest='build_experimental')
parser.add_option('--skip-toolchain', help='Skip toolchain untar',
action='store_true')
parser.add_option('--mac-sdk',
help='Set the mac-sdk (e.g. 10.6) to use when building with ninja.')
# To setup bash completion for this command first install optcomplete
# and then add this line to your .bashrc:
# complete -F _optcomplete build_sdk.py
try:
import optcomplete
optcomplete.autocomplete(parser)
except ImportError:
pass
global options
options, args = parser.parse_args(args[1:])
if args:
parser.error("Unexpected arguments: %s" % str(args))
generate_make.use_gyp = options.gyp
if buildbot_common.IsSDKBuilder():
options.archive = True
options.build_ports = True
options.build_app_engine = True
options.tar = True
toolchains = ['newlib', 'glibc', 'arm', 'pnacl', 'host']
if options.bionic:
toolchains.append('bionic')
print 'Building: ' + ' '.join(toolchains)
if options.archive and not options.tar:
parser.error('Incompatible arguments with archive.')
chrome_version = int(build_version.ChromeMajorVersion())
chrome_revision = build_version.ChromeRevision()
nacl_revision = build_version.NaClRevision()
pepper_ver = str(chrome_version)
pepper_old = str(chrome_version - 1)
pepperdir = os.path.join(OUT_DIR, 'pepper_' + pepper_ver)
pepperdir_old = os.path.join(OUT_DIR, 'pepper_' + pepper_old)
tarname = 'naclsdk_' + getos.GetPlatform() + '.tar.bz2'
tarfile = os.path.join(OUT_DIR, tarname)
if options.release:
pepper_ver = options.release
print 'Building PEPPER %s at %s' % (pepper_ver, chrome_revision)
if 'NACL_SDK_ROOT' in os.environ:
# We don't want the currently configured NACL_SDK_ROOT to have any effect
# of the build.
del os.environ['NACL_SDK_ROOT']
if not options.skip_toolchain:
BuildStepCleanPepperDirs(pepperdir, pepperdir_old)
BuildStepMakePepperDirs(pepperdir, ['include', 'toolchain', 'tools'])
BuildStepDownloadToolchains(toolchains)
BuildStepUntarToolchains(pepperdir, toolchains)
BuildStepBuildToolchains(pepperdir, toolchains)
BuildStepUpdateHelpers(pepperdir, True)
BuildStepUpdateUserProjects(pepperdir, toolchains,
options.build_experimental, True)
BuildStepCopyTextFiles(pepperdir, pepper_ver, chrome_revision, nacl_revision)
# Ship with libraries prebuilt, so run that first.
BuildStepBuildLibraries(pepperdir, 'src')
GenerateNotice(pepperdir)
# Verify the SDK contains what we expect.
BuildStepVerifyFilelist(pepperdir)
if options.tar:
BuildStepTarBundle(pepper_ver, tarfile)
if options.build_ports and getos.GetPlatform() == 'linux':
ports_tarfile = os.path.join(OUT_DIR, 'naclports.tar.bz2')
BuildStepSyncNaClPorts()
BuildStepBuildNaClPorts(pepper_ver, pepperdir)
if options.tar:
BuildStepTarNaClPorts(pepper_ver, ports_tarfile)
if options.build_app_engine and getos.GetPlatform() == 'linux':
BuildStepBuildAppEngine(pepperdir, chrome_revision)
# Archive on non-trybots.
if options.archive:
BuildStepArchiveBundle('build', pepper_ver, chrome_revision, nacl_revision,
tarfile)
if options.build_ports and getos.GetPlatform() == 'linux':
BuildStepArchiveBundle('naclports', pepper_ver, chrome_revision,
nacl_revision, ports_tarfile)
BuildStepArchiveSDKTools()
return 0
if __name__ == '__main__':
try:
sys.exit(main(sys.argv))
except KeyboardInterrupt:
buildbot_common.ErrorExit('build_sdk: interrupted')
| native_client_sdk/src/build_tools/build_sdk.py | 35,213 | !/usr/bin/env python Copyright (c) 2012 The Chromium Authors. All rights reserved. Use of this source code is governed by a BSD-style license that can be found in the LICENSE file. pylint: disable=W0621 std python includes local includes Add SDK make tools scripts to the python path. Replace a few placeholders in README Year/Month/Day Hour:Minute:Second Untar the newlib toolchains Then rename/move it to the pepper toolchain directory Untar the bionic toolchains Copy the existing arm toolchain from native_client tree Untar the glibc toolchains Then rename/move it to the pepper toolchain directory Untar the pnacl toolchains Then rename/move it to the pepper toolchain directory If the gyp options is specified we install a toolchain wrapper so that gyp can switch toolchains via a commandline option. Module 'os' has no 'symlink' member (on Windows). pylint: disable=E1101 List of toolchain headers to install. Source is relative to top of Chromium tree, destination is relative to the toolchain header directory. The list of files to install can be a simple list of strings or a list of pairs, where each pair corresponds to a mapping from source to destination names. Expand sources files using glob. arm toolchain header should be the same as the x86 newlib ones TODO(binji): dump_syms doesn't currently build on Windows. See http://crbug.com/245456 Add .exe extensions to all windows tools We only need sel_ldr from the 64-bit out directory. sel_ldr needs to be renamed, so we'll call it sel_ldr64. TODO(binji): dump_syms doesn't currently build on Windows. See http://crbug.com/245456 TODO(binji): This will build the pnacl_irt_shim twice; once as part of the Chromium build, and once here. When we move more of the SDK build process to gyp, we can remove this. NOTE: For ia32, gyp builds both x86-32 and x86-64 by default. Fill in the latest native pnacl shim library from the chrome build. arm isn't a valid toolchain for build_projects Update examples and libraries Cleanup .pyc file generated while building libraries. Without this we would end up shipping the pyc in the SDK tarball. Look for LICENSE files generate "manifest snippet" for this archive. Only push up sdk_tools.tgz and nacl_sdk.zip on the linux buildbot. checkout new copy of naclports sync existing copy to pinned revision. TODO(sbc): currently naclports doesn't know anything about Debug builds so the Debug subfolders are all empty. pepper_NN Remove the sdk_bundle directory to remove stale files from previous builds. Some naclports do not include a standalone LICENSE/COPYING file so we explicitly list those here for inclusion. To setup bash completion for this command first install optcomplete and then add this line to your .bashrc: complete -F _optcomplete build_sdk.py We don't want the currently configured NACL_SDK_ROOT to have any effect of the build. Ship with libraries prebuilt, so run that first. Verify the SDK contains what we expect. Archive on non-trybots. | 2,961 | en | 0.825541 |
# Copyright (c) 2016, 2020, Oracle and/or its affiliates.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License, version 2.0, as
# published by the Free Software Foundation.
#
# This program is also distributed with certain software (including
# but not limited to OpenSSL) that is licensed under separate terms,
# as designated in a particular file or component or in included license
# documentation. The authors of MySQL hereby grant you an
# additional permission to link the program and your derivative works
# with the separately licensed software that they have included with
# MySQL.
#
# Without limiting anything contained in the foregoing, this file,
# which is part of MySQL Connector/Python, is also subject to the
# Universal FOSS Exception, version 1.0, a copy of which can be found at
# http://oss.oracle.com/licenses/universal-foss-exception.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License, version 2.0, for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
"""Implementation of MySQL Authentication Plugin."""
import hashlib
import struct
from .helpers import hexlify
def xor_string(hash1, hash2, hash_size):
"""Encrypt/Decrypt function used for password encryption in
authentication, using a simple XOR.
Args:
hash1 (str): The first hash.
hash2 (str): The second hash.
Returns:
str: A string with the xor applied.
"""
xored = [h1 ^ h2 for (h1, h2) in zip(hash1, hash2)]
return struct.pack("{0}B".format(hash_size), *xored)
class BaseAuthPlugin(object):
"""Base class for implementing the authentication plugins."""
def __init__(self, username=None, password=None):
self._username = username
self._password = password
def name(self):
"""Returns the plugin name.
Returns:
str: The plugin name.
"""
raise NotImplementedError
def auth_name(self):
"""Returns the authentication name.
Returns:
str: The authentication name.
"""
raise NotImplementedError
class MySQL41AuthPlugin(BaseAuthPlugin):
"""Class implementing the MySQL Native Password authentication plugin."""
def name(self):
"""Returns the plugin name.
Returns:
str: The plugin name.
"""
return "MySQL 4.1 Authentication Plugin"
def auth_name(self):
"""Returns the authentication name.
Returns:
str: The authentication name.
"""
return "MYSQL41"
def auth_data(self, data):
"""Hashing for MySQL 4.1 authentication.
Args:
data (str): The authentication data.
Returns:
str: The authentication response.
"""
if self._password:
password = self._password.encode("utf-8") \
if isinstance(self._password, str) else self._password
hash1 = hashlib.sha1(password).digest()
hash2 = hashlib.sha1(hash1).digest()
xored = xor_string(hash1, hashlib.sha1(data + hash2).digest(), 20)
return "{0}\0{1}\0*{2}\0".format("", self._username, hexlify(xored))
return "{0}\0{1}\0".format("", self._username)
class PlainAuthPlugin(BaseAuthPlugin):
"""Class implementing the MySQL Plain authentication plugin."""
def name(self):
"""Returns the plugin name.
Returns:
str: The plugin name.
"""
return "Plain Authentication Plugin"
def auth_name(self):
"""Returns the authentication name.
Returns:
str: The authentication name.
"""
return "PLAIN"
def auth_data(self):
"""Returns the authentication data.
Returns:
str: The authentication data.
"""
return "\0{0}\0{1}".format(self._username, self._password)
class Sha256MemoryAuthPlugin(BaseAuthPlugin):
"""Class implementing the SHA256_MEMORY authentication plugin."""
def name(self):
"""Returns the plugin name.
Returns:
str: The plugin name.
"""
return "SHA256_MEMORY Authentication Plugin"
def auth_name(self):
"""Returns the authentication name.
Returns:
str: The authentication name.
"""
return "SHA256_MEMORY"
def auth_data(self, data):
"""Hashing for SHA256_MEMORY authentication.
The scramble is of the form:
SHA256(SHA256(SHA256(PASSWORD)),NONCE) XOR SHA256(PASSWORD)
Args:
data (str): The authentication data.
Returns:
str: The authentication response.
"""
password = self._password.encode("utf-8") \
if isinstance(self._password, str) else self._password
hash1 = hashlib.sha256(password).digest()
hash2 = hashlib.sha256(hashlib.sha256(hash1).digest() + data).digest()
xored = xor_string(hash2, hash1, 32)
return "\0{0}\0{1}".format(self._username, hexlify(xored))
| backend/env/Lib/site-packages/mysqlx/authentication.py | 5,431 | Base class for implementing the authentication plugins.
Class implementing the MySQL Native Password authentication plugin.
Class implementing the MySQL Plain authentication plugin.
Class implementing the SHA256_MEMORY authentication plugin.
Hashing for MySQL 4.1 authentication.
Args:
data (str): The authentication data.
Returns:
str: The authentication response.
Returns the authentication data.
Returns:
str: The authentication data.
Hashing for SHA256_MEMORY authentication.
The scramble is of the form:
SHA256(SHA256(SHA256(PASSWORD)),NONCE) XOR SHA256(PASSWORD)
Args:
data (str): The authentication data.
Returns:
str: The authentication response.
Returns the authentication name.
Returns:
str: The authentication name.
Returns the authentication name.
Returns:
str: The authentication name.
Returns the authentication name.
Returns:
str: The authentication name.
Returns the authentication name.
Returns:
str: The authentication name.
Returns the plugin name.
Returns:
str: The plugin name.
Returns the plugin name.
Returns:
str: The plugin name.
Returns the plugin name.
Returns:
str: The plugin name.
Returns the plugin name.
Returns:
str: The plugin name.
Encrypt/Decrypt function used for password encryption in
authentication, using a simple XOR.
Args:
hash1 (str): The first hash.
hash2 (str): The second hash.
Returns:
str: A string with the xor applied.
Implementation of MySQL Authentication Plugin.
Copyright (c) 2016, 2020, Oracle and/or its affiliates. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License, version 2.0, as published by the Free Software Foundation. This program is also distributed with certain software (including but not limited to OpenSSL) that is licensed under separate terms, as designated in a particular file or component or in included license documentation. The authors of MySQL hereby grant you an additional permission to link the program and your derivative works with the separately licensed software that they have included with MySQL. Without limiting anything contained in the foregoing, this file, which is part of MySQL Connector/Python, is also subject to the Universal FOSS Exception, version 1.0, a copy of which can be found at http://oss.oracle.com/licenses/universal-foss-exception. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License, version 2.0, for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA | 2,847 | en | 0.778393 |
from os.path import realpath
def main():
inpString = open(f'{realpath(__file__)[:-2]}txt').read()
inpString += '0' * (3 - len(inpString) % 3) # Padding to make it divisible by 3
inp = list(inpString)
for i in range(len(inp)):
if inp[i] not in '0123456789abcdef':
inp[i] = '0'
inp = ''.join(inp)
v = len(inp)//3
for i in range(0, len(inp), v):
print(inp[i : i + 2], end='') # Print first 2 char of every 1/3rd part of input
if __name__ == '__main__':
main() | 01/01.py | 551 | Padding to make it divisible by 3 Print first 2 char of every 1/3rd part of input | 81 | en | 0.859381 |
# This file is part of postcipes
# (c) Timofey Mukha
# The code is released under the MIT Licence.
# See LICENCE.txt and the Legal section in the README for more information
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from .postcipe import Postcipe
import turbulucid as tbl
import numpy as np
import h5py
__all__ = ["BackwardFacingStep"]
class BackwardFacingStep(Postcipe):
def __init__(self, path, nu, uRef):
Postcipe.__init__(self)
self.case = tbl.Case(path)
self.nu = nu
self.uRef = uRef
self.h = np.sum(tbl.edge_lengths(self.case, "stepB"))
self.H = np.sum(tbl.edge_lengths(self.case, "outletB")) - self.h
self.eRatio = (self.H + self.h)/self.H
self.tau1 = \
self.case.boundary_data("lowB1")[1]["wallShearStressMean"][:, 0]
self.tau2 = \
self.case.boundary_data("lowB2")[1]["wallShearStressMean"][:, 0]
self.tau = np.append(self.tau1, self.tau2)
self.x1 = self.case.boundary_data("lowB1")[0][:, 0]
self.x2 = self.case.boundary_data("lowB2")[0][:, 0]
self.x = np.append(self.x1, self.x2)
self.idx105h = np.argmin(np.abs(self.x1 + 1.05*self.h))
self.uTop = self.case.boundary_data("upB")[1]['UMean'][:, 0]
self.theta = None
self.delta99 = None
self.edgeU = None
def compute_delta99(self, u0='max', interpolate=True):
self.delta99 = np.zeros(self.x1.shape[0])
self.edgeU = np.zeros(self.x1.shape[0])
for i in range(self.x1.shape[0]):
x = self.x1[i]
y, v = tbl.profile_along_line(self.case, (x, -1), (x, 10),
correctDistance=True)
self.delta99[i] = tbl.delta_99(y, v['UMean'][:, 0], u0=u0,
interpolate=interpolate)
if u0 is 'max':
self.edgeU[i] = np.max(v['UMean'][:, 0])
elif u0 is 'last':
self.edgeU[i] = v['UMean'][-1, 0]
self.reDelta99 = self.delta99*self.edgeU/self.nu
self.reTau = self.delta99*np.sqrt(np.abs(self.tau1))/self.nu
self.delta99105h = self.delta99[self.idx105h]
return 0
def compute_theta(self, u0='max', interpolate=True):
self.theta = np.zeros(self.x1.shape[0])
self.edgeU = np.zeros(self.x1.shape[0])
for i in range(self.x1.shape[0]):
x = self.x1[i]
y, v = tbl.profile_along_line(self.case, (x, -1), (x, 10),
correctDistance=True)
self.theta[i] = tbl.momentum_thickness(y, v['UMean'][:, 0], u0=u0,
interpolate=interpolate)
if u0 is 'max':
self.edgeU[i] = np.max(v['UMean'][:, 0])
elif u0 is 'last':
self.edgeU[i] = v['UMean'][-1, 0]
self.reTheta = self.theta*self.edgeU/self.nu
self.reTheta105h = self.reTheta[self.idx105h]
return 0
def save(self, name):
f = h5py.File(name, 'w')
f.attrs["h"] = self.h
f.attrs["H"] = self.H
f.attrs["nu"] = self.nu
f.attrs["eRatio"] = self.eRatio
f.attrs["uRef"] = self.uRef
f.attrs["idx105h"] = self.idx105h
f.create_dataset("x1", data=self.x1)
f.create_dataset("x2", data=self.x2)
f.create_dataset("x", data=self.x)
f.create_dataset("uTop", data=self.uTop)
f.create_dataset("tau1", data=self.tau1)
f.create_dataset("tau2", data=self.tau2)
f.create_dataset("tau", data=self.tau)
if self.theta is None:
self.compute_theta()
if self.delta99 is None:
self.compute_delta99()
f.create_dataset("theta", data=self.theta)
f.create_dataset("delta99", data=self.delta99)
f.create_dataset("reTheta", data=self.reTheta)
f.create_dataset("reTau", data=self.reTau)
f.create_dataset("reDelta99", data=self.reDelta99)
f.close()
def load(self, name):
f = h5py.File(name, 'r')
self.h = f.attrs["h"]
self.H = f.attrs["H"]
self.nu = f.attrs["nu"]
self.eRatio = f.attrs["eRatio"]
self.uRef = f.attrs["uRef"]
self.idx105h = f.attrs["idx105h"]
self.x1 = f["x1"][:]
self.x2 = f["x2"][:]
self.x = f["x"][:]
self.uTop = f["uTop"][:]
self.tau1 = f["tau1"][:]
self.tau2 = f["tau2"][:]
self.tau = f["tau"][:]
self.theta = f["theta"][:]
self.delta99 = f["delta99"][:]
self.reTheta = f["reTheta"][:]
self.reTau = f["reTau"][:]
self.reDelta99 = f["reDelta99"][:]
f.close()
| postcipes/bfs.py | 4,955 | This file is part of postcipes (c) Timofey Mukha The code is released under the MIT Licence. See LICENCE.txt and the Legal section in the README for more information | 165 | en | 0.879404 |
from django.contrib import admin
from .models import Post, Reply
# Register your models here.
admin.site.register(Post)
admin.site.register(Reply)
| showcase/post/admin.py | 149 | Register your models here. | 26 | en | 0.957485 |
# Listing_19-1.py
# Copyright Warren & Carter Sande, 2013
# Released under MIT license http://www.opensource.org/licenses/mit-license.php
# Version $version ----------------------------
# Trying out sounds in Pygame
import pygame
pygame.init()
pygame.mixer.init()
screen = pygame.display.set_mode([640,480])
pygame.time.delay(1000) # Wait a second for the mixer to finish initializing
splat = pygame.mixer.Sound("splat.wav") # Create the Sound object
splat.play() # Play the sound
running = True
while running:
for event in pygame.event.get():
if event.type == pygame.QUIT:
running = False
pygame.quit()
| FatherSon/HelloWorld2_source_code/Listing_19-1.py | 692 | Listing_19-1.py Copyright Warren & Carter Sande, 2013 Released under MIT license http://www.opensource.org/licenses/mit-license.php Version $version ---------------------------- Trying out sounds in Pygame Wait a second for the mixer to finish initializing Create the Sound object Play the sound | 298 | en | 0.594575 |
"""
With these settings, tests run faster.
"""
from .base import * # noqa
from .base import env
# GENERAL
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#secret-key
SECRET_KEY = env(
"DJANGO_SECRET_KEY",
default="4UWuZDCfH6ezPZd4iO85Yg3eZnzistCGFxpeeWPhqlIkDtdbY2rEPnZb9ZPTNZ79",
)
# https://docs.djangoproject.com/en/dev/ref/settings/#test-runner
TEST_RUNNER = "django.test.runner.DiscoverRunner"
# PASSWORDS
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#password-hashers
PASSWORD_HASHERS = ["django.contrib.auth.hashers.MD5PasswordHasher"]
# TEMPLATES
# ------------------------------------------------------------------------------
TEMPLATES[-1]["OPTIONS"]["loaders"] = [ # type: ignore[index] # noqa F405
(
"django.template.loaders.cached.Loader",
[
"django.template.loaders.filesystem.Loader",
"django.template.loaders.app_directories.Loader",
],
)
]
# EMAIL
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#email-backend
EMAIL_BACKEND = "django.core.mail.backends.locmem.EmailBackend"
# Your stuff...
# ------------------------------------------------------------------------------
| config/settings/test.py | 1,422 | With these settings, tests run faster.
noqa GENERAL ------------------------------------------------------------------------------ https://docs.djangoproject.com/en/dev/ref/settings/secret-key https://docs.djangoproject.com/en/dev/ref/settings/test-runner PASSWORDS ------------------------------------------------------------------------------ https://docs.djangoproject.com/en/dev/ref/settings/password-hashers TEMPLATES ------------------------------------------------------------------------------ type: ignore[index] noqa F405 EMAIL ------------------------------------------------------------------------------ https://docs.djangoproject.com/en/dev/ref/settings/email-backend Your stuff... ------------------------------------------------------------------------------ | 777 | en | 0.371128 |
import copy
from functools import wraps
import numpy as np
import wandb
import torchvision
import torch
import torch.nn.functional as F
from kornia import enhance, filters
from torchvision.transforms import RandomApply, RandomChoice
from atariari.methods.utils import EarlyStopping
from torch import nn
from torch.utils.data import BatchSampler, RandomSampler
def default(val, def_val):
return def_val if val is None else val
def flatten(t):
return t.reshape(t.shape[0], -1)
def singleton(cache_key):
def inner_fn(fn):
@wraps(fn)
def wrapper(self, *args, **kwargs):
instance = getattr(self, cache_key)
if instance is not None:
return instance
instance = fn(self, *args, **kwargs)
setattr(self, cache_key, instance)
return instance
return wrapper
return inner_fn
def get_module_device(module):
return next(module.parameters()).device
def set_requires_grad(model, val):
for p in model.parameters():
p.requires_grad = val
# loss fn
def loss_fn(x, y):
x = F.normalize(x, dim=-1, p=2)
y = F.normalize(y, dim=-1, p=2)
return 2 - 2 * (x * y).sum(dim=-1)
# augmentation utils
# class RandomApply(nn.Module):
# def __init__(self, fn, p):
# super().__init__()
# self.fn = fn
# self.p = p
# def forward(self, x):
# if random.random() > self.p:
# return x
# return self.fn(x)
# exponential moving average
class EMA():
def __init__(self, beta):
super().__init__()
self.beta = beta
def update_average(self, old, new):
if old is None:
return new
return old * self.beta + (1 - self.beta) * new
def update_moving_average(ema_updater, ma_model, current_model):
for current_params, ma_params in zip(current_model.parameters(), ma_model.parameters()):
old_weight, up_weight = ma_params.data, current_params.data
ma_params.data = ema_updater.update_average(old_weight, up_weight)
# MLP class for projector and predictor
class MLP(nn.Module):
def __init__(self, dim, projection_size, hidden_size=4096):
super().__init__()
self.net = nn.Sequential(
nn.Linear(dim, hidden_size),
nn.BatchNorm1d(hidden_size),
nn.ReLU(inplace=True),
nn.Linear(hidden_size, projection_size)
)
def forward(self, x):
return self.net(x)
# a wrapper class for the base neural network
# will manage the interception of the hidden layer output
# and pipe it into the projecter and predictor nets
class NetWrapper(nn.Module):
def __init__(self, net, projection_size, projection_hidden_size, layer=-2):
super().__init__()
self.net = net
self.layer = layer # final avg-pooling layer
self.projector = None
self.projection_size = projection_size
self.projection_hidden_size = projection_hidden_size
self.hidden = None
self.hook_registered = False
def _find_layer(self):
if type(self.layer) == str:
modules = dict([*self.net.named_modules()])
return modules.get(self.layer, None)
elif type(self.layer) == int:
children = [*self.net.children()]
return children[self.layer]
return None
def _hook(self, _, __, output):
self.hidden = flatten(output)
def _register_hook(self):
layer = self._find_layer()
assert layer is not None, f'hidden layer ({self.layer}) not found'
handle = layer.register_forward_hook(self._hook)
self.hook_registered = True
@singleton('projector')
def _get_projector(self, hidden):
_, dim = hidden.shape
projector = MLP(dim, self.projection_size, self.projection_hidden_size)
return projector.to(hidden)
def get_representation(self, x):
if self.layer == -1:
return self.net(x)
if not self.hook_registered:
self._register_hook()
_ = self.net(x)
hidden = self.hidden
self.hidden = None
assert hidden is not None, f'hidden layer {self.layer} never emitted an output'
return hidden
def forward(self, x):
representation = self.get_representation(x)
projector = self._get_projector(representation)
projection = projector(representation)
return projection
# main class
class BYOL(nn.Module):
def __init__(self, net, image_size, grayscale=True, num_frame_stack=1, batch_size=64, hidden_layer=-2, projection_size=256, projection_hidden_size=4096, augment_fn=None, augment_fn2=None, moving_average_decay=0.99, wandb=None, patience=15):
super().__init__()
# default SimCLR augmentation
#####
# IMPORTANT for kornia: parameters are often float!! e.g. 1. vs 1
# DEFAULT_AUG = nn.Sequential(
# RandomApply(augs.ColorJitter(0.8, 0.8, 0.8, 0.2), p=0.8),
# augs.RandomHorizontalFlip(),
# RandomApply(filters.GaussianBlur2d((3, 3), (1.5, 1.5)), p=0.1),
# input tensor: float + normalized range [0,1]
# augs.RandomResizedCrop(
# size=(image_size, image_size), scale=(0.84, 1.), ratio=(1.,1.), p=1.0)
# augs.Normalize(mean=torch.tensor(
# [0.485, 0.456, 0.406]), std=torch.tensor([0.229, 0.224, 0.225]))
# )
kernel_size = (9, 9) # has to be ODD
kernel_std = np.random.uniform(low=0.1, high=2.0)
kernel_std = (kernel_std,)*2
aug_transform = torchvision.transforms.Compose([
RandomChoice(
[enhance.AdjustBrightness(0.4),
enhance.AdjustBrightness(0.3),
enhance.AdjustBrightness(0.2),
enhance.AdjustBrightness(0.1),
enhance.AdjustBrightness(0.0)]
),
RandomChoice(
[enhance.AdjustContrast(1.0),
enhance.AdjustContrast(0.9),
enhance.AdjustContrast(0.8),
enhance.AdjustContrast(0.7),
enhance.AdjustContrast(0.6)]
),
RandomApply([filters.GaussianBlur2d(
kernel_size, kernel_std)], p=0.5)
# RandomChoice(
# [enhance.AdjustContrast(1.0),
# enhance.AdjustContrast(1.0),
# enhance.AdjustContrast(1.0),
# filters.GaussianBlur2d((1, 1), (1, 1)),
# filters.GaussianBlur2d((3, 3), (1.5, 1.5))]
# )
])
self.augment1 = default(augment_fn, aug_transform)
self.augment2 = default(augment_fn2, self.augment1)
self.online_encoder = NetWrapper(
net, projection_size, projection_hidden_size, layer=hidden_layer)
self.target_encoder = None
self.target_ema_updater = EMA(moving_average_decay)
self.online_predictor = MLP(
projection_size, projection_size, projection_hidden_size)
self.batch_size = batch_size
# get device of network and make wrapper same device
self.device = torch.device(
'cuda' if torch.cuda.is_available() else 'cpu')
print(f"Device is {self.device.type}")
self.to(self.device)
self.wandb = wandb
self.early_stopper = EarlyStopping(
patience=patience, verbose=False, wandb=self.wandb, name="encoder-byol")
if self.wandb:
wandb.watch(self.online_encoder, self.target_encoder,
self.online_predictor)
# send a mock image tensor to instantiate singleton parameters
assert grayscale
nr_channels = num_frame_stack
self.forward(torch.rand(batch_size, nr_channels,
210, 160, device=self.device))
self.opt = torch.optim.Adam(self.parameters(), lr=3e-4)
print(
f"Finished Initialization of BYOL with model {self.online_encoder.net.__class__.__name__}")
@singleton('target_encoder')
def _get_target_encoder(self):
target_encoder = copy.deepcopy(self.online_encoder)
set_requires_grad(target_encoder, False)
return target_encoder
def reset_moving_average(self):
del self.target_encoder
self.target_encoder = None
def update_moving_average(self):
assert self.target_encoder is not None, 'target encoder has not been created yet'
update_moving_average(self.target_ema_updater,
self.target_encoder, self.online_encoder)
def forward(self, x):
image_one, image_two = self.augment1(x), self.augment2(x)
online_proj_one = self.online_encoder(image_one)
online_proj_two = self.online_encoder(image_two)
online_pred_one = self.online_predictor(online_proj_one)
online_pred_two = self.online_predictor(online_proj_two)
with torch.no_grad():
target_encoder = self._get_target_encoder()
target_proj_one = target_encoder(image_one)
target_proj_two = target_encoder(image_two)
loss_one = loss_fn(online_pred_one, target_proj_two.detach())
loss_two = loss_fn(online_pred_two, target_proj_one.detach())
loss = loss_one + loss_two
return loss.mean()
def logResults(self, epoch_idx, epoch_loss, prefix=""):
print(f"{prefix} Epoch: {epoch_idx}, Loss: {epoch_loss}")
if self.wandb:
self.wandb.log({prefix + '_loss': epoch_loss},
step=epoch_idx, commit=False)
def doOneEpoch(self, nr_epoch, episodes):
mode = "train" if self.training else "val"
data_generator = generate_batch(episodes, self.batch_size, self.device)
for steps, batch in enumerate(data_generator):
print(f"batch nr {steps} for mode {mode}")
loss = self(batch)
self.opt.zero_grad()
loss.backward()
self.opt.step()
self.update_moving_average() # update moving average of target encoder
self.logResults(nr_epoch, loss / steps, prefix=mode)
if mode == "val":
self.early_stopper(-loss / steps, self.online_encoder)
def generate_batch(episodes, batch_size, device):
total_steps = sum([len(e) for e in episodes])
print('Total Steps: {}'.format(total_steps))
# Episode sampler
# Sample `num_samples` episodes then batchify them with `self.batch_size` episodes per batch
sampler = BatchSampler(RandomSampler(range(len(episodes)),
replacement=True, num_samples=total_steps),
batch_size, drop_last=True)
for nr, indices in enumerate(sampler):
x = []
episodes_batch = [episodes[i] for i in indices]
# print(f"indices in sampler nr {nr} are {*indices,}")
for e in episodes_batch:
t = np.random.randint(0, len(e))
x.append(e[t])
yield torch.stack(x).float().to(device) / 255. # SCALING!!!! | byol_pytorch/byol_pytorch.py | 11,096 | loss fn augmentation utils class RandomApply(nn.Module): def __init__(self, fn, p): super().__init__() self.fn = fn self.p = p def forward(self, x): if random.random() > self.p: return x return self.fn(x) exponential moving average MLP class for projector and predictor a wrapper class for the base neural network will manage the interception of the hidden layer output and pipe it into the projecter and predictor nets final avg-pooling layer main class default SimCLR augmentation IMPORTANT for kornia: parameters are often float!! e.g. 1. vs 1 DEFAULT_AUG = nn.Sequential( RandomApply(augs.ColorJitter(0.8, 0.8, 0.8, 0.2), p=0.8), augs.RandomHorizontalFlip(), RandomApply(filters.GaussianBlur2d((3, 3), (1.5, 1.5)), p=0.1), input tensor: float + normalized range [0,1] augs.RandomResizedCrop( size=(image_size, image_size), scale=(0.84, 1.), ratio=(1.,1.), p=1.0) augs.Normalize(mean=torch.tensor( [0.485, 0.456, 0.406]), std=torch.tensor([0.229, 0.224, 0.225])) ) has to be ODD RandomChoice( [enhance.AdjustContrast(1.0), enhance.AdjustContrast(1.0), enhance.AdjustContrast(1.0), filters.GaussianBlur2d((1, 1), (1, 1)), filters.GaussianBlur2d((3, 3), (1.5, 1.5))] ) get device of network and make wrapper same device send a mock image tensor to instantiate singleton parameters update moving average of target encoder Episode sampler Sample `num_samples` episodes then batchify them with `self.batch_size` episodes per batch print(f"indices in sampler nr {nr} are {*indices,}") SCALING!!!! | 1,585 | en | 0.464673 |
# SPDX-License-Identifier: MIT
#!/usr/bin/env python3
import os
import sys
from ply import yacc
from ply.lex import TOKEN
from .slexer import SLexer
from ..lib import dbg
from .symbol import (
BinaryOperatorSymbol, ConstraintSymbol, FieldSymbol, ArraySymbol,
CallSymbol, IDSymbol, ConcreteIntSymbol, StringLiteralSymbol
)
# for LALR table reuse
ROOT = os.path.dirname(__file__)
sys.path.append(ROOT)
class SParser(object):
# Precedence rules for the arithmetic operators
precedence = (
('left', 'LOR'),
('left', 'LAND'),
('left', 'OR'),
('left', 'XOR'),
('left', 'AND'),
('left', 'EQ', 'NE'),
('left', 'GT', 'GE', 'LT', 'LE'),
('left', 'RSHIFT', 'LSHIFT'),
('left', 'PLUS', 'MINUS'),
('left', 'TIMES', 'DIVIDE', 'MOD')
)
def __init__(self, **kwargs):
self.slex = SLexer()
self.slex.build()
self.tokens = self.slex.tokens
self.yacc = yacc.yacc(module=self)
def p_expression_1(self, p):
''' expression : binary_expression '''
p[0] = p[1]
def p_binary_expression_1(self, p):
''' binary_expression : cast_expression '''
p[0] = p[1]
def p_binary_expression_2(self, p):
''' binary_expression : binary_expression TIMES binary_expression
| binary_expression DIVIDE binary_expression
| binary_expression MOD binary_expression
| binary_expression PLUS binary_expression
| binary_expression MINUS binary_expression
| binary_expression RSHIFT binary_expression
| binary_expression LSHIFT binary_expression
| binary_expression LT binary_expression
| binary_expression LE binary_expression
| binary_expression GE binary_expression
| binary_expression GT binary_expression
| binary_expression EQ binary_expression
| binary_expression NE binary_expression
| binary_expression AND binary_expression
| binary_expression OR binary_expression
| binary_expression XOR binary_expression
| binary_expression LAND binary_expression
| binary_expression LOR binary_expression
'''
p[0] = BinaryOperatorSymbol(p[1], p[2], p[3])
def p_binary_expression_3(self, p):
# expr CONSTRAINT_OP constraints
''' expression : expression CONSTRAINT_OP LBRACE constraint_list RBRACE '''
p[0] = ConstraintSymbol(p[1], p[4])
def p_constraint(self, p):
''' constraint : LBRACKET concrete_integer_expression COMMA concrete_integer_expression RBRACKET '''
p[0] = (p[2], p[4])
def p_constraint_list(self, p):
''' constraint_list : constraint_list COMMA constraint
| constraint '''
if len(p) == 2:
p[0] = [p[1]]
else:
p[0] = p[1]
p[1].append(p[3])
def p_cast_expression_1(self, p):
""" cast_expression : unary_expression """
p[0] = p[1]
def p_unary_expression_1(self, p):
""" unary_expression : postfix_expression """
p[0] = p[1]
def p_unary_expression_2(self, p):
""" unary_expression : AND postfix_expression """
# XXX : needs to handle & operator
p[0] = p[2]
def p_postfix_expression_1(self, p):
''' postfix_expression : primary_expression '''
p[0] = p[1]
def p_postfix_expression_2(self, p):
''' postfix_expression : postfix_expression ARROW ID'''
p[0] = FieldSymbol(p[1], p[3])
def p_postfix_expression3(self, p):
''' postfix_expression : postfix_expression LBRACKET expression RBRACKET '''
p[0] = ArraySymbol(p[1], p[3])
def p_postfix_expression4(self, p):
''' postfix_expression : postfix_expression LPAREN argument_list RPAREN '''
p[0] = CallSymbol(p[1], p[3])
def p_primary_expression_1(self, p):
''' primary_expression : ID '''
p[0] = IDSymbol(p[1])
def p_primary_expression_2(self, p):
''' primary_expression : concrete_integer_expression '''
p[0] = ConcreteIntSymbol(p[1])
def p_primary_expression_3(self, p):
'''primary_expression : LPAREN expression RPAREN'''
p[0] = p[2]
def p_primary_expression_4(self, p):
''' primary_expression : STRING_LITERAL '''
p[0] = StringLiteralSymbol(p[1])
def p_concrete_integer(self, p):
''' concrete_integer_expression : INT_CONST_DEC
| MINUS INT_CONST_DEC '''
if len(p) == 3:
p[0] = -int(p[2])
else:
p[0] = int(p[1])
def p_argument_list(self, p):
''' argument_list :
| expression
| argument_list COMMA expression '''
if len(p) == 1:
p[0] = []
elif len(p) == 2:
p[0] = [p[1]]
else:
p[0] = p[1]
p[1].append(p[3])
def parse(self, text):
self.last_text = text
return self.yacc.parse(input = text,
lexer = self.slex)
def p_error(self, p):
#dbg.debug('Illegal token %s' % repr(p))
#dbg.debug('Text : %s' % self.last_text)
return
if __name__ == '__main__':
parser = SParser()
tests = ["\"String Literal\\n\"",
"malloc(256)@={ [0, 0] }",
"malloc(256)@={ [0, 0], [2, 18446744073709551615] }"]
for test in tests:
print(parse_symbol(test))
| analyzer/apisan/parse/sparser.py | 5,986 | argument_list :
| expression
| argument_list COMMA expression
binary_expression : cast_expression
binary_expression : binary_expression TIMES binary_expression
| binary_expression DIVIDE binary_expression
| binary_expression MOD binary_expression
| binary_expression PLUS binary_expression
| binary_expression MINUS binary_expression
| binary_expression RSHIFT binary_expression
| binary_expression LSHIFT binary_expression
| binary_expression LT binary_expression
| binary_expression LE binary_expression
| binary_expression GE binary_expression
| binary_expression GT binary_expression
| binary_expression EQ binary_expression
| binary_expression NE binary_expression
| binary_expression AND binary_expression
| binary_expression OR binary_expression
| binary_expression XOR binary_expression
| binary_expression LAND binary_expression
| binary_expression LOR binary_expression
expression : expression CONSTRAINT_OP LBRACE constraint_list RBRACE
cast_expression : unary_expression
concrete_integer_expression : INT_CONST_DEC
| MINUS INT_CONST_DEC
constraint : LBRACKET concrete_integer_expression COMMA concrete_integer_expression RBRACKET
constraint_list : constraint_list COMMA constraint
| constraint
expression : binary_expression
postfix_expression : postfix_expression LBRACKET expression RBRACKET
postfix_expression : postfix_expression LPAREN argument_list RPAREN
postfix_expression : primary_expression
postfix_expression : postfix_expression ARROW ID
primary_expression : ID
primary_expression : concrete_integer_expression
primary_expression : LPAREN expression RPAREN
primary_expression : STRING_LITERAL
unary_expression : postfix_expression
unary_expression : AND postfix_expression
SPDX-License-Identifier: MIT!/usr/bin/env python3 for LALR table reuse Precedence rules for the arithmetic operators expr CONSTRAINT_OP constraints XXX : needs to handle & operatordbg.debug('Illegal token %s' % repr(p))dbg.debug('Text : %s' % self.last_text) | 1,986 | en | 0.528269 |
""" StreamSort Projects Extension -- Constants
Copyright (c) 2021 IdmFoundInHim, under MIT License
"""
SINGLE_MAX_MS = 15 * 60 * 1000
SINGLE_MAX_TRACKS = 4
| projects/constants.py | 158 | StreamSort Projects Extension -- Constants
Copyright (c) 2021 IdmFoundInHim, under MIT License | 96 | en | 0.329785 |
# -*- coding: utf-8 -*-
""" XIO plugin for the minicbf format of images (DECTRIS-PILATUS).
"""
__version__ = "0.2.1"
__author__ = "Pierre Legrand (pierre.legrand@synchrotron-soleil.fr)"
__date__ = "23-09-2012"
__copyright__ = "Copyright (c) 2009-2012 Pierre Legrand"
__license__ = "New BSD, http://www.opensource.org/licenses/bsd-license.php"
import time
HEADER_KEYS = ["Detector:", "Pixel_size", "Silicon", "Exposure_time",
"Exposure_period", "Tau", "Count_cutoff", "Threshold_setting",
"N_excluded_pixels","Excluded_pixels:", "Flat_field:", "Trim_directory:",
"Wavelength", "Energy_range", "Detector_distance", "Detector_Voffset",
"Beam_xy","Flux","Filter_transmission","Start_angle", "Angle_increment",
"Detector_2theta", "Polarization", "Alpha", "Kappa", "Phi", "Chi",
"Oscillation_axis", "N_oscillations"]
def date_time(timestr):
"from str return timestr + msec"
t_a, t_b = timestr.split(".")
return time.strptime(t_a, "%Y/%b/%d %H:%M:%S"), float("0."+t_b)
def date_seconds(timestr):
"from str return seconds"
t_a, msec = date_time(timestr)
return time.mktime(t_a) + msec
def get_edge_resolution(pixel_x, width, distance, wavelength):
"Calculate EdgeResolution"
from math import sin, atan
if abs(DISTANCE(distance)) > 0.0:
rad = 0.5 * float(FLOAT2(pixel_x)) * int(width)
return FLOAT1(wavelength)/(2*sin(0.5*atan(rad/DISTANCE(distance))))
else:
return 0.
FLOAT1 = lambda x: float(x.split()[0])
FLOAT2 = lambda x: float(x.split()[0])*1e3
def DISTANCE(inp):
args = inp.split()
try:
if args[1] == "m": return float(args[0])*1e3
except:
return float(args[0])
BEAMX = lambda x, y: float(x[x.find("(")+1:x.find(")")-1].split(",")[0])\
*FLOAT2(y)
BEAMY = lambda x, y: float(x[x.find("(")+1:x.find(")")-1].split(",")[1])\
*FLOAT2(y)
class Interpreter:
"Dummy class, container for standard Dict and Function."
HTD = {
# The adsc Header Translator Dictionary.
# Potential problems:
# - There are multiple SIZE1, SIZE2 instances.
# = The orientation of SIZE1 and SIZE2 is unknown
# Not a problem as long as SIZE1 = SIZE2..
'ExposureTime':(['Exposure_time'], FLOAT1),
'BeamX':(['Beam_xy', 'Pixel_size'], BEAMX),
'BeamY':(['Beam_xy', 'Pixel_size'], BEAMY),
'Distance':(['Detector_distance'], DISTANCE),
'Wavelength':(['Wavelength'], FLOAT1),
'PixelX':(['Pixel_size'], FLOAT2),
'PixelY':(['Pixel_size'], FLOAT2),
'Width':(['Binary-Size-Fastest-Dimension'], int),
'Height':(['Binary-Size-Second-Dimension'], int),
#'Message':(['MESSAGE'], lambda x: x.split(';')),
'PhiStart':(['Start_angle'], FLOAT1),
'PhiEnd':(['Start_angle', 'Angle_increment'], \
lambda x, y: FLOAT1(x)+FLOAT1(y)),
'PhiWidth':(['Angle_increment'], FLOAT1),
'EdgeResolution':(['Pixel_size','Binary-Size-Second-Dimension','Detector_distance','Wavelength'], \
get_edge_resolution),
# Added keys from Graeme's convention.
'TwoTheta':(['Detector_2theta'], FLOAT1), # No example yet...
'SerialNumber':(['Detector:'], str),
'HeaderSize':(['HEADER_SIZE'], int),
'OscAxis':(['Oscillation_axis'], lambda x: x.split(",")[0].lower().strip()),
'DateStr':(['DATE'], str),
'DateSeconds':(['DATE'], date_seconds),
}
SpecialRules = {
# No special rules for now
}
Identifiers = {
# Based on Serial Number. Contains (Synchrotron,BLname,DetectorType)
#413:('ESRF','ID14EH2','ADSC Q4'),
#420:('ESRF','ID14EH4','ADSC Q4R'),
}
def __init__(self):
self.raw_head_dict = None
def getRawHeadDict(self, raw_head):
"Intepret the ascii structure of the minicbf image header."
i_1 = 28+raw_head.find("_array_data.header_contents")
i_2 = raw_head.find("_array_data.data", i_1)
i_3 = raw_head.find("--CIF-BINARY-FORMAT-SECTION--", i_2)+29
i_4 = i_3+500
lis = [line[2:].strip().split(" ", 1) \
for line in raw_head[i_1:i_2].splitlines() \
if line and line[0]=="#"]
lis2 = [line[2:].strip().split(": ", 1) \
for line in raw_head[i_3:i_4].splitlines() \
if line and line[0:2]=="X-"]
# Filling the raw_header_dict with some default values,
# in case they are missing in the header.
self.raw_head_dict = {'Detector_2theta': "0.", 'MESSAGE': ''}
for val in lis:
if (val[0] in HEADER_KEYS):
if len(val) == 2:
self.raw_head_dict[val[0]] = val[1]
else:
self.raw_head_dict[val[0]] = None
self.raw_head_dict.update(dict([ val for val in lis2 \
if "Binary-" in val[0]]))
# Add some default values
self.raw_head_dict.update({'HEADER_SIZE': i_3})
self.raw_head_dict.update({'DATE': " ".join(lis[1])})
#self.raw_head_dict.update({'MESSAGE': '', 'TWO_THETA': '0',
# 'Beam_xy':"(1330.30, 1314.90)",
# 'Detector_distance': "0.4 m",
# 'Wavelength':"0.980 A",
# 'Angle_increment':"0.2 deg",
# 'Start_angle': "0. deg",
# 'Detector_2theta': "0. deg"})
return self.raw_head_dict
| yamtbx/dataproc/XIO/plugins/minicbf_interpreter.py | 5,584 | Dummy class, container for standard Dict and Function.
from str return seconds
from str return timestr + msec
Intepret the ascii structure of the minicbf image header.
Calculate EdgeResolution
XIO plugin for the minicbf format of images (DECTRIS-PILATUS).
-*- coding: utf-8 -*- The adsc Header Translator Dictionary. Potential problems: - There are multiple SIZE1, SIZE2 instances. = The orientation of SIZE1 and SIZE2 is unknown Not a problem as long as SIZE1 = SIZE2..'Message':(['MESSAGE'], lambda x: x.split(';')), Added keys from Graeme's convention. No example yet... No special rules for now Based on Serial Number. Contains (Synchrotron,BLname,DetectorType)413:('ESRF','ID14EH2','ADSC Q4'),420:('ESRF','ID14EH4','ADSC Q4R'), Filling the raw_header_dict with some default values, in case they are missing in the header. Add some default valuesself.raw_head_dict.update({'MESSAGE': '', 'TWO_THETA': '0', 'Beam_xy':"(1330.30, 1314.90)", 'Detector_distance': "0.4 m", 'Wavelength':"0.980 A", 'Angle_increment':"0.2 deg", 'Start_angle': "0. deg", 'Detector_2theta': "0. deg"}) | 1,241 | en | 0.595659 |
# -*- coding: utf-8 -
#
# This file is part of gaffer. See the NOTICE for more information.
import os
import sys
from setuptools import setup, find_packages, Extension
py_version = sys.version_info[:2]
if py_version < (2, 6):
raise RuntimeError('On Python 2, Gaffer requires Python 2.6 or better')
CLASSIFIERS = [
'Development Status :: 4 - Beta',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: POSIX',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.0',
'Programming Language :: Python :: 3.1',
'Programming Language :: Python :: 3.2',
'Topic :: System :: Boot',
'Topic :: System :: Monitoring',
'Topic :: System :: Systems Administration',
'Topic :: Software Development :: Libraries']
# read long description
with open(os.path.join(os.path.dirname(__file__), 'README.rst')) as f:
long_description = f.read()
DATA_FILES = [
('gaffer', ["LICENSE", "MANIFEST.in", "NOTICE", "README.rst",
"THANKS", "UNLICENSE"])
]
setup(name='gaffer',
version="0.4.1",
description = 'simple system process manager',
long_description = long_description,
classifiers = CLASSIFIERS,
license = 'BSD',
url = 'http://github.com/benoitc/gaffer',
author = 'Benoit Chesneau',
author_email = 'benoitc@e-engura.org',
packages=find_packages(),
ext_modules = [
Extension("gaffer.sync", ["gaffer/sync.c"])
],
install_requires = [
'pyuv>=0.8.3',
'six',
'psutil',
'tornado==2.4',
'colorama',
'setproctitle'
],
data_files = DATA_FILES,
entry_points="""
[console_scripts]
gafferd=gaffer.node.gafferd:run
gafferctl=gaffer.node.gafferctl:run
gaffer=gaffer.pm.main:main
""")
| setup.py | 2,125 | -*- coding: utf-8 - This file is part of gaffer. See the NOTICE for more information. read long description | 107 | en | 0.855484 |
#!/usr/bin/env python
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This example performs several tasks on Google Compute Platform. It can be
# run directly or can be imported into an interactive python session. This
# can also serve as live integration tests.
#
# To run directly, use python 2.7 or greater:
# - $ python gce_demo.py --help # to see the help screen
# - $ python gce_demo.py # to run all demos / tests
#
# To run interactively:
# - Make sure you have valid values in secrets.py
# (For more information about setting up your credentials, see the
# libcloud/common/google.py docstring)
# - Run 'python' in this directory, then:
# import gce_demo
# gce = gce_demo.get_gce_driver()
# gce.list_nodes()
# etc.
# - Or, to run the full demo from the interactive python shell:
# import gce_demo
# gce_demo.CLEANUP = False # optional
# gce_demo.MAX_NODES = 4 # optional
# gce_demo.DATACENTER = 'us-central1-a' # optional
# gce_demo.main_compute() # 'compute' only demo
# gce_demo.main_load_balancer() # 'load_balancer' only demo
# gce_demo.main_dns() # 'dns only demo
# gce_demo.main() # all demos / tests
import os.path
import sys
import datetime
import time
try:
import argparse
except:
print('This script uses the python "argparse" module. Please use Python '
'2.7 or greater.')
raise
try:
import secrets
except ImportError:
print('"demos/secrets.py" not found.\n\n'
'Please copy secrets.py-dist to secrets.py and update the GCE* '
'values with appropriate authentication information.\n'
'Additional information about setting these values can be found '
'in the docstring for:\n'
'libcloud/common/google.py\n')
sys.exit(1)
# Add parent dir of this file's dir to sys.path (OS-agnostically)
sys.path.append(os.path.normpath(os.path.join(os.path.dirname(__file__),
os.path.pardir)))
from libcloud.compute.types import Provider
from libcloud.compute.providers import get_driver
from libcloud.common.google import ResourceNotFoundError
from libcloud.loadbalancer.types import Provider as Provider_lb
from libcloud.loadbalancer.providers import get_driver as get_driver_lb
from libcloud.dns.types import Provider as Provider_dns
from libcloud.dns.providers import get_driver as get_driver_dns
from libcloud.dns.base import Record, Zone
from libcloud.utils.py3 import PY3
if PY3:
import urllib.request as url_req
else:
import urllib2 as url_req
# Maximum number of 1-CPU nodes to allow to run simultaneously
MAX_NODES = 5
# String that all resource names created by the demo will start with
# WARNING: Any resource that has a matching name will be destroyed.
DEMO_BASE_NAME = 'lct'
# Datacenter to create resources in
DATACENTER = 'us-central1-f'
# Clean up resources at the end (can be set to false in order to
# inspect resources at the end of the run). Resources will be cleaned
# at the beginning regardless.
CLEANUP = True
args = getattr(secrets, 'GCE_PARAMS', ())
kwargs = getattr(secrets, 'GCE_KEYWORD_PARAMS', {})
# Add datacenter to kwargs for Python 2.5 compatibility
kwargs = kwargs.copy()
kwargs['datacenter'] = DATACENTER
# ==== HELPER FUNCTIONS ====
def get_gce_driver():
driver = get_driver(Provider.GCE)(*args, **kwargs)
return driver
def get_gcelb_driver(gce_driver=None):
# The GCE Load Balancer driver uses the GCE Compute driver for all of its
# API calls. You can either provide the driver directly, or provide the
# same authentication information so the LB driver can get its own
# Compute driver.
if gce_driver:
driver = get_driver_lb(Provider_lb.GCE)(gce_driver=gce_driver)
else:
driver = get_driver_lb(Provider_lb.GCE)(*args, **kwargs)
return driver
def get_dns_driver(gce_driver=None):
# The Google DNS driver uses the GCE Compute driver for all of its
# API calls. You can either provide the driver directly, or provide the
# same authentication information so the LB driver can get its own
# Compute driver.
if gce_driver:
driver = get_driver_dns(Provider_dns.GOOGLE)(gce_driver=gce_driver)
else:
driver = get_driver_dns(Provider_dns.GOOGLE)(*args, **kwargs)
return driver
def display(title, resource_list=[]):
"""
Display a list of resources.
:param title: String to be printed at the heading of the list.
:type title: ``str``
:param resource_list: List of resources to display
:type resource_list: Any ``object`` with a C{name} attribute
"""
print('=> %s' % title)
for item in resource_list:
if isinstance(item, Record):
if item.name.startswith(DEMO_BASE_NAME):
print('=> name=%s, type=%s' % (item.name, item.type))
else:
print(' name=%s, type=%s' % (item.name, item.type))
elif isinstance(item, Zone):
if item.domain.startswith(DEMO_BASE_NAME):
print('=> name=%s, dnsname=%s' % (item.id, item.domain))
else:
print(' name=%s, dnsname=%s' % (item.id, item.domain))
elif hasattr(item, 'name'):
if item.name.startswith(DEMO_BASE_NAME):
print('=> %s' % item.name)
else:
print(' %s' % item.name)
else:
if item.startswith(DEMO_BASE_NAME):
print('=> %s' % item)
else:
print(' %s' % item)
def cleanup_only():
start_time = datetime.datetime.now()
display('Clean-up start time: %s' % str(start_time))
gce = get_gce_driver()
# Get project info and print name
project = gce.ex_get_project()
display('Project: %s' % project.name)
# == Get Lists of Everything and Display the lists (up to 10) ==
# These can either just return values for the current datacenter (zone)
# or for everything.
all_nodes = gce.list_nodes(ex_zone='all')
display('Nodes:', all_nodes)
all_addresses = gce.ex_list_addresses(region='all')
display('Addresses:', all_addresses)
all_volumes = gce.list_volumes(ex_zone='all')
display('Volumes:', all_volumes)
# This can return everything, but there is a large amount of overlap,
# so we'll just get the sizes from the current zone.
sizes = gce.list_sizes()
display('Sizes:', sizes)
# These are global
firewalls = gce.ex_list_firewalls()
display('Firewalls:', firewalls)
networks = gce.ex_list_networks()
display('Networks:', networks)
images = gce.list_images()
display('Images:', images)
locations = gce.list_locations()
display('Locations:', locations)
zones = gce.ex_list_zones()
display('Zones:', zones)
snapshots = gce.ex_list_snapshots()
display('Snapshots:', snapshots)
# == Clean up any old demo resources ==
display('Cleaning up any "%s" resources' % DEMO_BASE_NAME)
clean_up(gce, DEMO_BASE_NAME, all_nodes,
all_addresses + all_volumes + firewalls + networks + snapshots)
volumes = gce.list_volumes()
clean_up(gce, DEMO_BASE_NAME, None, volumes)
end_time = datetime.datetime.now()
display('Total runtime: %s' % str(end_time - start_time))
def clean_up(gce, base_name, node_list=None, resource_list=None):
"""
Destroy all resources that have a name beginning with 'base_name'.
:param base_name: String with the first part of the name of resources
to destroy
:type base_name: ``str``
:keyword node_list: List of nodes to consider for deletion
:type node_list: ``list`` of :class:`Node`
:keyword resource_list: List of resources to consider for deletion
:type resource_list: ``list`` of I{Resource Objects}
"""
if node_list is None:
node_list = []
if resource_list is None:
resource_list = []
# Use ex_destroy_multiple_nodes to destroy nodes
del_nodes = []
for node in node_list:
if node.name.startswith(base_name):
del_nodes.append(node)
result = gce.ex_destroy_multiple_nodes(del_nodes)
for i, success in enumerate(result):
if success:
display(' Deleted %s' % del_nodes[i].name)
else:
display(' Failed to delete %s' % del_nodes[i].name)
# Destroy everything else with just the destroy method
for resrc in resource_list:
if resrc.name.startswith(base_name):
try:
resrc.destroy()
except ResourceNotFoundError:
display(' Not found: %s (%s)' % (resrc.name,
resrc.__class__.__name__))
except:
class_name = resrc.__class__.__name__
display(' Failed to Delete %s (%s)' % (resrc.name,
class_name))
raise
# ==== COMPUTE CODE STARTS HERE ====
def main_compute():
start_time = datetime.datetime.now()
display('Compute demo/test start time: %s' % str(start_time))
gce = get_gce_driver()
# Get project info and print name
project = gce.ex_get_project()
display('Project: %s' % project.name)
# == Get Lists of Everything and Display the lists (up to 10) ==
# These can either just return values for the current datacenter (zone)
# or for everything.
all_nodes = gce.list_nodes(ex_zone='all')
display('Nodes:', all_nodes)
all_addresses = gce.ex_list_addresses(region='all')
display('Addresses:', all_addresses)
all_volumes = gce.list_volumes(ex_zone='all')
display('Volumes:', all_volumes)
# This can return everything, but there is a large amount of overlap,
# so we'll just get the sizes from the current zone.
sizes = gce.list_sizes()
display('Sizes:', sizes)
# These are global
firewalls = gce.ex_list_firewalls()
display('Firewalls:', firewalls)
networks = gce.ex_list_networks()
display('Networks:', networks)
images = gce.list_images()
display('Images:', images)
locations = gce.list_locations()
display('Locations:', locations)
zones = gce.ex_list_zones()
display('Zones:', zones)
snapshots = gce.ex_list_snapshots()
display('Snapshots:', snapshots)
# == Clean up any old demo resources ==
display('Cleaning up any "%s" resources' % DEMO_BASE_NAME)
clean_up(gce, DEMO_BASE_NAME, all_nodes,
all_addresses + all_volumes + firewalls + networks + snapshots)
# == Create Node with disk auto-created ==
if MAX_NODES > 1:
display('Creating a node with boot/local-ssd using GCE structure:')
name = '%s-gstruct' % DEMO_BASE_NAME
img_url = "projects/debian-cloud/global/images/"
img_url += "backports-debian-7-wheezy-v20141205"
disk_type_url = "projects/%s/zones/us-central1-f/" % project.name
disk_type_url += "diskTypes/local-ssd"
gce_disk_struct = [
{
"type": "PERSISTENT",
"deviceName": '%s-gstruct' % DEMO_BASE_NAME,
"initializeParams": {
"diskName": '%s-gstruct' % DEMO_BASE_NAME,
"sourceImage": img_url
},
"boot": True,
"autoDelete": True
},
{
"type": "SCRATCH",
"deviceName": '%s-gstruct-lssd' % DEMO_BASE_NAME,
"initializeParams": {
"diskType": disk_type_url
},
"autoDelete": True
}
]
node_gstruct = gce.create_node(name, 'n1-standard-1', None,
'us-central1-f',
ex_disks_gce_struct=gce_disk_struct)
num_disks = len(node_gstruct.extra['disks'])
display(' Node %s created with %d disks' % (node_gstruct.name,
num_disks))
display('Creating Node with auto-created SSD:')
name = '%s-np-node' % DEMO_BASE_NAME
node_1 = gce.create_node(name, 'n1-standard-1', 'debian-7',
ex_tags=['libcloud'], ex_disk_type='pd-ssd',
ex_disk_auto_delete=False)
display(' Node %s created' % name)
# == Create, and attach a disk ==
display('Creating a new disk:')
disk_name = '%s-attach-disk' % DEMO_BASE_NAME
volume = gce.create_volume(10, disk_name)
if volume.attach(node_1):
display(' Attached %s to %s' % (volume.name, node_1.name))
display(' Disabled auto-delete for %s on %s' % (volume.name,
node_1.name))
gce.ex_set_volume_auto_delete(volume, node_1, auto_delete=False)
if CLEANUP:
# == Detach the disk ==
if gce.detach_volume(volume, ex_node=node_1):
display(' Detached %s from %s' % (volume.name,
node_1.name))
# == Create Snapshot ==
display('Creating a snapshot from existing disk:')
# Create a disk to snapshot
vol_name = '%s-snap-template' % DEMO_BASE_NAME
image = gce.ex_get_image('debian-7')
vol = gce.create_volume(None, vol_name, image=image)
display('Created disk %s to shapshot:' % DEMO_BASE_NAME)
# Snapshot volume
snapshot = vol.snapshot('%s-snapshot' % DEMO_BASE_NAME)
display(' Snapshot %s created' % snapshot.name)
# == Create Node with existing disk ==
display('Creating Node with existing disk:')
name = '%s-persist-node' % DEMO_BASE_NAME
# Use objects this time instead of names
# Get latest Debian 7 image
image = gce.ex_get_image('debian-7')
# Get Machine Size
size = gce.ex_get_size('n1-standard-1')
# Create Disk from Snapshot created above
volume_name = '%s-boot-disk' % DEMO_BASE_NAME
volume = gce.create_volume(None, volume_name, snapshot=snapshot)
display(' Created %s from snapshot' % volume.name)
# Create Node with Disk
node_2 = gce.create_node(name, size, image, ex_tags=['libcloud'],
ex_boot_disk=volume,
ex_disk_auto_delete=False)
display(' Node %s created with attached disk %s' % (node_2.name,
volume.name))
# == Update Tags for Node ==
display('Updating Tags for %s:' % node_2.name)
tags = node_2.extra['tags']
tags.append('newtag')
if gce.ex_set_node_tags(node_2, tags):
display(' Tags updated for %s' % node_2.name)
check_node = gce.ex_get_node(node_2.name)
display(' New tags: %s' % check_node.extra['tags'])
# == Setting Metadata for Node ==
display('Setting Metadata for %s:' % node_2.name)
if gce.ex_set_node_metadata(node_2, {'foo': 'bar', 'baz': 'foobarbaz'}):
display(' Metadata updated for %s' % node_2.name)
check_node = gce.ex_get_node(node_2.name)
display(' New Metadata: %s' % check_node.extra['metadata'])
# == Create Multiple nodes at once ==
base_name = '%s-multiple-nodes' % DEMO_BASE_NAME
number = MAX_NODES - 2
if number > 0:
display('Creating Multiple Nodes (%s):' % number)
multi_nodes = gce.ex_create_multiple_nodes(base_name, size, image,
number,
ex_tags=['libcloud'],
ex_disk_auto_delete=True)
for node in multi_nodes:
display(' Node %s created' % node.name)
# == Create a Network ==
display('Creating Network:')
name = '%s-network' % DEMO_BASE_NAME
cidr = '10.10.0.0/16'
network_1 = gce.ex_create_network(name, cidr)
display(' Network %s created' % network_1.name)
# == Create a Firewall ==
display('Creating a Firewall:')
name = '%s-firewall' % DEMO_BASE_NAME
allowed = [{'IPProtocol': 'tcp',
'ports': ['3141']}]
firewall_1 = gce.ex_create_firewall(name, allowed, network=network_1,
source_tags=['libcloud'])
display(' Firewall %s created' % firewall_1.name)
# == Create a Static Address ==
display('Creating an Address:')
name = '%s-address' % DEMO_BASE_NAME
address_1 = gce.ex_create_address(name)
display(' Address %s created with IP %s' % (address_1.name,
address_1.address))
# == List Updated Resources in current zone/region ==
display('Updated Resources in current zone/region')
nodes = gce.list_nodes()
display('Nodes:', nodes)
addresses = gce.ex_list_addresses()
display('Addresses:', addresses)
firewalls = gce.ex_list_firewalls()
display('Firewalls:', firewalls)
networks = gce.ex_list_networks()
display('Networks:', networks)
snapshots = gce.ex_list_snapshots()
display('Snapshots:', snapshots)
if CLEANUP:
display('Cleaning up %s resources created' % DEMO_BASE_NAME)
clean_up(gce, DEMO_BASE_NAME, nodes,
addresses + firewalls + networks + snapshots)
volumes = gce.list_volumes()
clean_up(gce, DEMO_BASE_NAME, None, volumes)
end_time = datetime.datetime.now()
display('Total runtime: %s' % str(end_time - start_time))
# ==== LOAD BALANCER CODE STARTS HERE ====
def main_load_balancer():
start_time = datetime.datetime.now()
display('Load-balancer demo/test start time: %s' % str(start_time))
gce = get_gce_driver()
gcelb = get_gcelb_driver(gce)
# Get project info and print name
project = gce.ex_get_project()
display('Project: %s' % project.name)
# Existing Balancers
balancers = gcelb.list_balancers()
display('Load Balancers', balancers)
# Protocols
protocols = gcelb.list_protocols()
display('Protocols', protocols)
# Healthchecks
healthchecks = gcelb.ex_list_healthchecks()
display('Health Checks', healthchecks)
# This demo is based on the GCE Load Balancing Quickstart described here:
# https://developers.google.com/compute/docs/load-balancing/lb-quickstart
# == Clean-up and existing demo resources ==
all_nodes = gce.list_nodes(ex_zone='all')
firewalls = gce.ex_list_firewalls()
display('Cleaning up any "%s" resources' % DEMO_BASE_NAME)
clean_up(gce, DEMO_BASE_NAME, all_nodes,
balancers + healthchecks + firewalls)
# == Create 3 nodes to balance between ==
startup_script = ('apt-get -y update && '
'apt-get -y install apache2 && '
'hostname > /var/www/index.html')
tag = '%s-www' % DEMO_BASE_NAME
base_name = '%s-www' % DEMO_BASE_NAME
image = gce.ex_get_image('debian-7')
size = gce.ex_get_size('n1-standard-1')
number = 3
display('Creating %d nodes' % number)
metadata = {'items': [{'key': 'startup-script',
'value': startup_script}]}
lb_nodes = gce.ex_create_multiple_nodes(base_name, size, image,
number, ex_tags=[tag],
ex_metadata=metadata,
ex_disk_auto_delete=True,
ignore_errors=False)
display('Created Nodes', lb_nodes)
# == Create a Firewall for instances ==
display('Creating a Firewall')
name = '%s-firewall' % DEMO_BASE_NAME
allowed = [{'IPProtocol': 'tcp',
'ports': ['80']}]
firewall = gce.ex_create_firewall(name, allowed, source_tags=[tag])
display(' Firewall %s created' % firewall.name)
# == Create a Health Check ==
display('Creating a HealthCheck')
name = '%s-healthcheck' % DEMO_BASE_NAME
# These are all the default values, but listed here as an example. To
# create a healthcheck with the defaults, only name is required.
hc = gcelb.ex_create_healthcheck(name, host=None, path='/', port='80',
interval=5, timeout=5,
unhealthy_threshold=2,
healthy_threshold=2)
display('Healthcheck %s created' % hc.name)
# == Create Load Balancer ==
display('Creating Load Balancer')
name = '%s-lb' % DEMO_BASE_NAME
port = 80
protocol = 'tcp'
algorithm = None
members = lb_nodes[:2] # Only attach the first two initially
healthchecks = [hc]
balancer = gcelb.create_balancer(name, port, protocol, algorithm, members,
ex_healthchecks=healthchecks)
display(' Load Balancer %s created' % balancer.name)
# == Attach third Node ==
display('Attaching additional node to Load Balancer')
member = balancer.attach_compute_node(lb_nodes[2])
display(' Attached %s to %s' % (member.id, balancer.name))
# == Show Balancer Members ==
members = balancer.list_members()
display('Load Balancer Members')
for member in members:
display(' ID: %s IP: %s' % (member.id, member.ip))
# == Remove a Member ==
display('Removing a Member')
detached = members[0]
detach = balancer.detach_member(detached)
if detach:
display(' Member %s detached from %s' % (detached.id,
balancer.name))
# == Show Updated Balancer Members ==
members = balancer.list_members()
display('Updated Load Balancer Members')
for member in members:
display(' ID: %s IP: %s' % (member.id, member.ip))
# == Reattach Member ==
display('Reattaching Member')
member = balancer.attach_member(detached)
display(' Member %s attached to %s' % (member.id, balancer.name))
# == Test Load Balancer by connecting to it multiple times ==
PAUSE = 60
display('Sleeping for %d seconds for LB members to serve...' % PAUSE)
time.sleep(PAUSE)
rounds = 200
url = 'http://%s/' % balancer.ip
line_length = 75
display('Connecting to %s %s times' % (url, rounds))
for x in range(rounds):
response = url_req.urlopen(url)
if PY3:
output = str(response.read(), encoding='utf-8').strip()
else:
output = response.read().strip()
if 'www-001' in output:
padded_output = output.center(line_length)
elif 'www-002' in output:
padded_output = output.rjust(line_length)
else:
padded_output = output.ljust(line_length)
sys.stdout.write('\r%s' % padded_output)
sys.stdout.flush()
time.sleep(.25)
print ""
if CLEANUP:
balancers = gcelb.list_balancers()
healthchecks = gcelb.ex_list_healthchecks()
nodes = gce.list_nodes(ex_zone='all')
firewalls = gce.ex_list_firewalls()
display('Cleaning up %s resources created' % DEMO_BASE_NAME)
clean_up(gce, DEMO_BASE_NAME, nodes,
balancers + healthchecks + firewalls)
end_time = datetime.datetime.now()
display('Total runtime: %s' % str(end_time - start_time))
# ==== GOOGLE DNS CODE STARTS HERE ====
def main_dns():
start_time = datetime.datetime.now()
display('DNS demo/test start time: %s' % str(start_time))
gce = get_gce_driver()
gdns = get_dns_driver()
# Get project info and print name
project = gce.ex_get_project()
display('Project: %s' % project.name)
# Get list of managed zones
zones = gdns.iterate_zones()
display('Zones', zones)
# Get list of records
zones = gdns.iterate_zones()
for z in zones:
records = gdns.iterate_records(z)
display('Records for managed zone "%s"' % z.id, records)
# TODO(erjohnso): Finish this DNS section. Challenging in that you need to
# own a domain, so testing will require user customization. Perhaps a new
# command-line required flag unless --skip-dns is supplied. Also, real
# e2e testing should try to do DNS lookups on new records, but DNS TTL
# and propagation delays will introduce limits on what can be tested.
end_time = datetime.datetime.now()
display('Total runtime: %s' % str(end_time - start_time))
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='Google Cloud Platform Demo / Live Test Script')
parser.add_argument("--compute",
help="perform compute demo / live tests",
dest="compute", action="store_true")
parser.add_argument("--load-balancer",
help="perform load-balancer demo / live tests",
dest="lb", action="store_true")
parser.add_argument("--dns",
help="perform DNS demo / live tests",
dest="dns", action="store_true")
parser.add_argument("--cleanup-only",
help="perform clean-up (skips all tests)",
dest="cleanup", action="store_true")
cl_args = parser.parse_args()
if cl_args.cleanup:
cleanup_only()
else:
if cl_args.compute:
main_compute()
if cl_args.lb:
main_load_balancer()
if cl_args.dns:
main_dns()
| demos/gce_demo.py | 26,411 | !/usr/bin/env python Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with this work for additional information regarding copyright ownership. The ASF licenses this file to You under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. This example performs several tasks on Google Compute Platform. It can be run directly or can be imported into an interactive python session. This can also serve as live integration tests. To run directly, use python 2.7 or greater: - $ python gce_demo.py --help to see the help screen - $ python gce_demo.py to run all demos / tests To run interactively: - Make sure you have valid values in secrets.py (For more information about setting up your credentials, see the libcloud/common/google.py docstring) - Run 'python' in this directory, then: import gce_demo gce = gce_demo.get_gce_driver() gce.list_nodes() etc. - Or, to run the full demo from the interactive python shell: import gce_demo gce_demo.CLEANUP = False optional gce_demo.MAX_NODES = 4 optional gce_demo.DATACENTER = 'us-central1-a' optional gce_demo.main_compute() 'compute' only demo gce_demo.main_load_balancer() 'load_balancer' only demo gce_demo.main_dns() 'dns only demo gce_demo.main() all demos / tests Add parent dir of this file's dir to sys.path (OS-agnostically) Maximum number of 1-CPU nodes to allow to run simultaneously String that all resource names created by the demo will start with WARNING: Any resource that has a matching name will be destroyed. Datacenter to create resources in Clean up resources at the end (can be set to false in order to inspect resources at the end of the run). Resources will be cleaned at the beginning regardless. Add datacenter to kwargs for Python 2.5 compatibility ==== HELPER FUNCTIONS ==== The GCE Load Balancer driver uses the GCE Compute driver for all of its API calls. You can either provide the driver directly, or provide the same authentication information so the LB driver can get its own Compute driver. The Google DNS driver uses the GCE Compute driver for all of its API calls. You can either provide the driver directly, or provide the same authentication information so the LB driver can get its own Compute driver. Get project info and print name == Get Lists of Everything and Display the lists (up to 10) == These can either just return values for the current datacenter (zone) or for everything. This can return everything, but there is a large amount of overlap, so we'll just get the sizes from the current zone. These are global == Clean up any old demo resources == Use ex_destroy_multiple_nodes to destroy nodes Destroy everything else with just the destroy method ==== COMPUTE CODE STARTS HERE ==== Get project info and print name == Get Lists of Everything and Display the lists (up to 10) == These can either just return values for the current datacenter (zone) or for everything. This can return everything, but there is a large amount of overlap, so we'll just get the sizes from the current zone. These are global == Clean up any old demo resources == == Create Node with disk auto-created == == Create, and attach a disk == == Detach the disk == == Create Snapshot == Create a disk to snapshot Snapshot volume == Create Node with existing disk == Use objects this time instead of names Get latest Debian 7 image Get Machine Size Create Disk from Snapshot created above Create Node with Disk == Update Tags for Node == == Setting Metadata for Node == == Create Multiple nodes at once == == Create a Network == == Create a Firewall == == Create a Static Address == == List Updated Resources in current zone/region == ==== LOAD BALANCER CODE STARTS HERE ==== Get project info and print name Existing Balancers Protocols Healthchecks This demo is based on the GCE Load Balancing Quickstart described here: https://developers.google.com/compute/docs/load-balancing/lb-quickstart == Clean-up and existing demo resources == == Create 3 nodes to balance between == == Create a Firewall for instances == == Create a Health Check == These are all the default values, but listed here as an example. To create a healthcheck with the defaults, only name is required. == Create Load Balancer == Only attach the first two initially == Attach third Node == == Show Balancer Members == == Remove a Member == == Show Updated Balancer Members == == Reattach Member == == Test Load Balancer by connecting to it multiple times == ==== GOOGLE DNS CODE STARTS HERE ==== Get project info and print name Get list of managed zones Get list of records TODO(erjohnso): Finish this DNS section. Challenging in that you need to own a domain, so testing will require user customization. Perhaps a new command-line required flag unless --skip-dns is supplied. Also, real e2e testing should try to do DNS lookups on new records, but DNS TTL and propagation delays will introduce limits on what can be tested. | 5,630 | en | 0.840526 |
from django.shortcuts import render
from django.http import JsonResponse
from django.core.files.storage import FileSystemStorage
import requests
# Create your views here.
def cnn(request):
return render(request, 'CNN/cnn.html')
def change(request):
########################################################################
if request.method == 'POST' and request.FILES['origin']:
myfile = request.FILES['origin']
fs = FileSystemStorage('./bssets/inputs/') #defaults to MEDIA_ROOT
filename = fs.save(myfile.name, myfile)
###############################################################
# # Here we know the file is in
api_host = 'http://35.221.233.111:8000/'
headers = {'Content-Type': 'application/json'}
photo = './bssets/inputs/'+ filename # file name = file_name + randomNumber
files = {'file': (filename, open(photo, 'rb'), 'image/jpeg')}
response = requests.post(api_host, files=files)
return JsonResponse(response, safe=False)
| CNN/views.py | 1,043 | Create your views here.defaults to MEDIA_ROOT Here we know the file is in file name = file_name + randomNumber | 113 | en | 0.79925 |
"""
Compatibility tools for differences between Python 2 and 3
"""
import functools
import itertools
import sys
import urllib
PY3 = (sys.version_info[0] >= 3)
PY3_2 = sys.version_info[:2] == (3, 2)
if PY3:
import builtins
from collections import namedtuple
from io import StringIO, BytesIO
import inspect
cStringIO = StringIO
import pickle as cPickle
pickle = cPickle
import urllib.request
import urllib.parse
from urllib.request import HTTPError, urlretrieve, URLError
import io
bytes = bytes
str = str
asunicode = lambda x, _ : str(x)
def asbytes(s):
if isinstance(s, bytes):
return s
return s.encode('latin1')
def asstr(s):
if isinstance(s, str):
return s
return s.decode('latin1')
def asstr2(s): #added JP, not in numpy version
if isinstance(s, str):
return s
elif isinstance(s, bytes):
return s.decode('latin1')
else:
return str(s)
def isfileobj(f):
return isinstance(f, io.FileIO)
def open_latin1(filename, mode='r'):
return open(filename, mode=mode, encoding='iso-8859-1')
strchar = 'U'
# have to explicitly put builtins into the namespace
range = range
map = map
zip = zip
filter = filter
reduce = functools.reduce
long = int
unichr = chr
zip_longest = itertools.zip_longest
# list-producing versions of the major Python iterating functions
def lrange(*args, **kwargs):
return list(range(*args, **kwargs))
def lzip(*args, **kwargs):
return list(zip(*args, **kwargs))
def lmap(*args, **kwargs):
return list(map(*args, **kwargs))
def lfilter(*args, **kwargs):
return list(filter(*args, **kwargs))
urlopen = urllib.request.urlopen
urljoin = urllib.parse.urljoin
urlretrieve = urllib.request.urlretrieve
urlencode = urllib.parse.urlencode
string_types = str
input = input
ArgSpec= namedtuple('ArgSpec', ['args', 'varargs', 'keywords', 'defaults'])
def getargspec(func):
"""
Simple workaroung for getargspec deprecation that returns
an ArgSpec-like object
"""
sig = inspect.signature(func)
parameters = sig.parameters
args, defaults = [], []
varargs, keywords = None, None
for key in parameters:
parameter = parameters[key]
if parameter.kind == inspect.Parameter.VAR_POSITIONAL:
varargs = key
elif parameter.kind == inspect.Parameter.VAR_KEYWORD:
keywords = key
else:
args.append(key)
if parameter.default is not parameter.empty:
defaults.append(parameter.default)
defaults = None if len(defaults) == 0 else defaults
return ArgSpec(args, varargs, keywords, defaults)
else:
import __builtin__ as builtins
# not writeable when instantiated with string, doesn't handle unicode well
from cStringIO import StringIO as cStringIO
# always writeable
from StringIO import StringIO
from inspect import getargspec
BytesIO = StringIO
import cPickle
pickle = cPickle
import urllib2
import urlparse
bytes = str
str = str
asbytes = str
asstr = str
asstr2 = str
strchar = 'S'
def isfileobj(f):
return isinstance(f, file)
def asunicode(s, encoding='ascii'):
if isinstance(s, unicode):
return s
return s.decode(encoding)
def open_latin1(filename, mode='r'):
return open(filename, mode=mode)
# import iterator versions of these functions
range = xrange
zip = itertools.izip
filter = itertools.ifilter
map = itertools.imap
reduce = reduce
long = long
unichr = unichr
zip_longest = itertools.izip_longest
# Python 2-builtin ranges produce lists
lrange = builtins.range
lzip = builtins.zip
lmap = builtins.map
lfilter = builtins.filter
urlopen = urllib2.urlopen
urljoin = urlparse.urljoin
urlencode = urllib.urlencode
HTTPError = urllib2.HTTPError
URLError = urllib2.URLError
string_types = basestring
input = raw_input
def getexception():
return sys.exc_info()[1]
def asbytes_nested(x):
if hasattr(x, '__iter__') and not isinstance(x, (bytes, str)):
return [asbytes_nested(y) for y in x]
else:
return asbytes(x)
def asunicode_nested(x):
if hasattr(x, '__iter__') and not isinstance(x, (bytes, str)):
return [asunicode_nested(y) for y in x]
else:
return asunicode(x)
try:
advance_iterator = next
except NameError:
def advance_iterator(it):
return it.next()
next = advance_iterator
try:
callable = callable
except NameError:
def callable(obj):
return any("__call__" in klass.__dict__ for klass in type(obj).__mro__)
def iteritems(obj, **kwargs):
"""replacement for six's iteritems for Python2/3 compat
uses 'iteritems' if available and otherwise uses 'items'.
Passes kwargs to method.
"""
func = getattr(obj, "iteritems", None)
if not func:
func = obj.items
return func(**kwargs)
def iterkeys(obj, **kwargs):
func = getattr(obj, "iterkeys", None)
if not func:
func = obj.keys
return func(**kwargs)
def itervalues(obj, **kwargs):
func = getattr(obj, "itervalues", None)
if not func:
func = obj.values
return func(**kwargs)
def get_function_name(func):
try:
return func.im_func.func_name
except AttributeError:
#Python 3
return func.__name__
def get_class(func):
try:
return func.im_class
except AttributeError:
#Python 3
return func.__self__.__class__
try:
combinations = itertools.combinations
except:
# Python 2.6 only
def combinations(iterable, r):
# combinations('ABCD', 2) --> AB AC AD BC BD CD
# combinations(lrange(4), 3) --> 012 013 023 123
pool = tuple(iterable)
n = len(pool)
if r > n:
return
indices = lrange(r)
yield tuple(pool[i] for i in indices)
while True:
for i in reversed(lrange(r)):
if indices[i] != i + n - r:
break
else:
return
indices[i] += 1
for j in range(i+1, r):
indices[j] = indices[j-1] + 1
yield tuple(pool[i] for i in indices)
| statsmodels/compat/python.py | 6,588 | Simple workaroung for getargspec deprecation that returns
an ArgSpec-like object
replacement for six's iteritems for Python2/3 compat
uses 'iteritems' if available and otherwise uses 'items'.
Passes kwargs to method.
Compatibility tools for differences between Python 2 and 3
added JP, not in numpy version have to explicitly put builtins into the namespace list-producing versions of the major Python iterating functions not writeable when instantiated with string, doesn't handle unicode well always writeable import iterator versions of these functions Python 2-builtin ranges produce listsPython 3Python 3 Python 2.6 only combinations('ABCD', 2) --> AB AC AD BC BD CD combinations(lrange(4), 3) --> 012 013 023 123 | 720 | en | 0.674038 |
# -*- coding: utf-8 -*-
# Copyright (c) 2020, Frappe Technologies and contributors
# License: MIT. See LICENSE
# import frappe
from frappe.model.document import Document
class WebPageBlock(Document):
pass
| frappe/website/doctype/web_page_block/web_page_block.py | 209 | -*- coding: utf-8 -*- Copyright (c) 2020, Frappe Technologies and contributors License: MIT. See LICENSE import frappe | 118 | en | 0.567409 |
from builtins import range
from builtins import object
import numpy as np
from past.builtins import xrange
class KNearestNeighbor(object):
""" a kNN classifier with L2 distance """
def __init__(self):
pass
def train(self, X, y):
"""
Train the classifier. For k-nearest neighbors this is just
memorizing the training data.
Inputs:
- X: A numpy array of shape (num_train, D) containing the training data
consisting of num_train samples each of dimension D.
- y: A numpy array of shape (N,) containing the training labels, where
y[i] is the label for X[i].
"""
self.X_train = X
self.y_train = y
def predict(self, X, k=1, num_loops=0):
"""
Predict labels for test data using this classifier.
Inputs:
- X: A numpy array of shape (num_test, D) containing test data consisting
of num_test samples each of dimension D.
- k: The number of nearest neighbors that vote for the predicted labels.
- num_loops: Determines which implementation to use to compute distances
between training points and testing points.
Returns:
- y: A numpy array of shape (num_test,) containing predicted labels for the
test data, where y[i] is the predicted label for the test point X[i].
"""
if num_loops == 0:
dists = self.compute_distances_no_loops(X)
elif num_loops == 1:
dists = self.compute_distances_one_loop(X)
elif num_loops == 2:
dists = self.compute_distances_two_loops(X)
else:
raise ValueError("Invalid value %d for num_loops" % num_loops)
return self.predict_labels(dists, k=k)
def compute_distances_two_loops(self, X):
"""
Compute the distance between each test point in X and each training point
in self.X_train using a nested loop over both the training data and the
test data.
Inputs:
- X: A numpy array of shape (num_test, D) containing test data.
Returns:
- dists: A numpy array of shape (num_test, num_train) where dists[i, j]
is the Euclidean distance between the ith test point and the jth training
point.
"""
num_test = X.shape[0]
num_train = self.X_train.shape[0]
dists = np.zeros((num_test, num_train))
for i in range(num_test):
for j in range(num_train):
#####################################################################
# TODO: #
# Compute the l2 distance between the ith test point and the jth #
# training point, and store the result in dists[i, j]. You should #
# not use a loop over dimension, nor use np.linalg.norm(). #
#####################################################################
# *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
dists[i, j] = np.sqrt(sum((X[i, ] - self.X_train[j, ]) ** 2))
pass
# *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
return dists
def compute_distances_one_loop(self, X):
"""
Compute the distance between each test point in X and each training point
in self.X_train using a single loop over the test data.
Input / Output: Same as compute_distances_two_loops
"""
num_test = X.shape[0]
num_train = self.X_train.shape[0]
dists = np.zeros((num_test, num_train))
for i in range(num_test):
#######################################################################
# TODO: #
# Compute the l2 distance between the ith test point and all training #
# points, and store the result in dists[i, :]. #
# Do not use np.linalg.norm(). #
#######################################################################
# *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
dists[i, :] = np.sqrt(np.sum((self.X_train - X[i, :]) ** 2, 1))
pass
# *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
return dists
def compute_distances_no_loops(self, X):
"""
Compute the distance between each test point in X and each training point
in self.X_train using no explicit loops.
Input / Output: Same as compute_distances_two_loops
"""
num_test = X.shape[0]
num_train = self.X_train.shape[0]
dists = np.zeros((num_test, num_train))
#########################################################################
# TODO: #
# Compute the l2 distance between all test points and all training #
# points without using any explicit loops, and store the result in #
# dists. #
# #
# You should implement this function using only basic array operations; #
# in particular you should not use functions from scipy, #
# nor use np.linalg.norm(). #
# #
# HINT: Try to formulate the l2 distance using matrix multiplication #
# and two broadcast sums. #
#########################################################################
# *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
dists = np.sqrt(
np.sum((self.X_train[np.newaxis, :] - X[np.newaxis, :].reshape((num_test, 1, X.shape[1]))) ** 2, 2))
pass
# *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
return dists
def predict_labels(self, dists, k=1):
"""
Given a matrix of distances between test points and training points,
predict a label for each test point.
Inputs:
- dists: A numpy array of shape (num_test, num_train) where dists[i, j]
gives the distance betwen the ith test point and the jth training point.
Returns:
- y: A numpy array of shape (num_test,) containing predicted labels for the
test data, where y[i] is the predicted label for the test point X[i].
"""
num_test = dists.shape[0]
y_pred = np.zeros(num_test)
for i in range(num_test):
# A list of length k storing the labels of the k nearest neighbors to
# the ith test point.
closest_y = []
#########################################################################
# TODO: #
# Use the distance matrix to find the k nearest neighbors of the ith #
# testing point, and use self.y_train to find the labels of these #
# neighbors. Store these labels in closest_y. #
# Hint: Look up the function numpy.argsort. #
#########################################################################
# *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
closest_y = self.y_train[dists[i, ].argsort()[:k]]
pass
# *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
#########################################################################
# TODO: #
# Now that you have found the labels of the k nearest neighbors, you #
# need to find the most common label in the list closest_y of labels. #
# Store this label in y_pred[i]. Break ties by choosing the smaller #
# label. #
#########################################################################
# *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
unique, counts = np.unique(closest_y, return_counts=True)
y_pred[i] = unique[np.argmax(counts)]
pass
# *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
return y_pred
| assignments/2021/assignment1/cs231n/classifiers/k_nearest_neighbor.py | 8,815 | a kNN classifier with L2 distance
Compute the distance between each test point in X and each training point
in self.X_train using no explicit loops.
Input / Output: Same as compute_distances_two_loops
Compute the distance between each test point in X and each training point
in self.X_train using a single loop over the test data.
Input / Output: Same as compute_distances_two_loops
Compute the distance between each test point in X and each training point
in self.X_train using a nested loop over both the training data and the
test data.
Inputs:
- X: A numpy array of shape (num_test, D) containing test data.
Returns:
- dists: A numpy array of shape (num_test, num_train) where dists[i, j]
is the Euclidean distance between the ith test point and the jth training
point.
Predict labels for test data using this classifier.
Inputs:
- X: A numpy array of shape (num_test, D) containing test data consisting
of num_test samples each of dimension D.
- k: The number of nearest neighbors that vote for the predicted labels.
- num_loops: Determines which implementation to use to compute distances
between training points and testing points.
Returns:
- y: A numpy array of shape (num_test,) containing predicted labels for the
test data, where y[i] is the predicted label for the test point X[i].
Given a matrix of distances between test points and training points,
predict a label for each test point.
Inputs:
- dists: A numpy array of shape (num_test, num_train) where dists[i, j]
gives the distance betwen the ith test point and the jth training point.
Returns:
- y: A numpy array of shape (num_test,) containing predicted labels for the
test data, where y[i] is the predicted label for the test point X[i].
Train the classifier. For k-nearest neighbors this is just
memorizing the training data.
Inputs:
- X: A numpy array of shape (num_train, D) containing the training data
consisting of num_train samples each of dimension D.
- y: A numpy array of shape (N,) containing the training labels, where
y[i] is the label for X[i].
TODO: Compute the l2 distance between the ith test point and the jth training point, and store the result in dists[i, j]. You should not use a loop over dimension, nor use np.linalg.norm(). *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)***** *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)***** TODO: Compute the l2 distance between the ith test point and all training points, and store the result in dists[i, :]. Do not use np.linalg.norm(). *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)***** *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)***** TODO: Compute the l2 distance between all test points and all training points without using any explicit loops, and store the result in dists. You should implement this function using only basic array operations; in particular you should not use functions from scipy, nor use np.linalg.norm(). HINT: Try to formulate the l2 distance using matrix multiplication and two broadcast sums. *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)***** *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)***** A list of length k storing the labels of the k nearest neighbors to the ith test point. TODO: Use the distance matrix to find the k nearest neighbors of the ith testing point, and use self.y_train to find the labels of these neighbors. Store these labels in closest_y. Hint: Look up the function numpy.argsort. *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)***** *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)***** TODO: Now that you have found the labels of the k nearest neighbors, you need to find the most common label in the list closest_y of labels. Store this label in y_pred[i]. Break ties by choosing the smaller label. *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)***** *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)***** | 4,797 | en | 0.812165 |
# Copyright 2016 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
r"""Wrapper for selecting the navigation environment that we want to train and
test on.
"""
import os
import glob
import logging
from research.cognitive_mapping_and_planning.render import swiftshader_renderer as renderer
from research.cognitive_mapping_and_planning.src import file_utils as fu
from research.cognitive_mapping_and_planning.src import utils as utils
def get_dataset(dataset_name):
dataset = None
if dataset_name == 'sbpd':
dataset = StanfordBuildingParserDataset(dataset_name)
else:
logging.fatal('Not one of sbpd')
return dataset
class Loader():
def get_data_dir(self):
pass
def get_meta_data(self, file_name, data_dir=None):
if data_dir is None:
data_dir = self.get_data_dir()
full_file_name = os.path.join(data_dir, 'meta', file_name)
assert (fu.exists(full_file_name)), \
'{:s} does not exist'.format(full_file_name)
ext = os.path.splitext(full_file_name)[1]
ls = None
if ext == '.txt':
ls = []
with fu.fopen(full_file_name, 'r') as f:
for l in f:
ls.append(l.rstrip())
elif ext == '.pkl':
ls = utils.load_variables(full_file_name)
return ls
def load_building(self, name, data_dir=None):
if data_dir is None:
data_dir = self.get_data_dir()
out = {'name': name, 'data_dir': data_dir,
'room_dimension_file': os.path.join(data_dir, 'room-dimension',
name + '.pkl'),
'class_map_folder': os.path.join(data_dir, 'class-maps')}
return out
def load_building_meshes(self, building):
dir_name = os.path.join(building['data_dir'], 'mesh', building['name'])
mesh_file_name = glob.glob1(dir_name, '*.obj')[0]
mesh_file_name_full = os.path.join(dir_name, mesh_file_name)
logging.error('Loading building from obj file: %s', mesh_file_name_full)
shape = renderer.Shape(mesh_file_name_full, load_materials=True,
name_prefix=building['name'] + '_')
return [shape]
class StanfordBuildingParserDataset(Loader):
def __init__(self, ver):
self.ver = ver
self.data_dir = None
def get_data_dir(self):
if self.data_dir is None:
self.data_dir = 'data/stanford_building_parser_dataset/'
return self.data_dir
def get_benchmark_sets(self):
return self._get_benchmark_sets()
def get_split(self, split_name):
if self.ver == 'sbpd':
return self._get_split(split_name)
else:
logging.fatal('Unknown version.')
@staticmethod
def _get_benchmark_sets():
sets = ['train1', 'val', 'test']
return sets
@staticmethod
def _get_split(split_name):
train = ['area1', 'area5a', 'area5b', 'area6']
train1 = ['area1']
val = ['area3']
test = ['area4']
sets = {'train': train, 'train1': train1, 'val': val, 'test': test,
'all': sorted(list(set(train + val + test)))}
return sets[split_name]
| research/cognitive_mapping_and_planning/datasets/factory.py | 3,880 | Wrapper for selecting the navigation environment that we want to train and
test on.
Copyright 2016 The TensorFlow Authors All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ============================================================================== | 745 | en | 0.843272 |
import pandas as pd
import numpy as np
def gloriosafuncao(df):
df = pd.DataFrame([df])
numerico = [
11, "email", 1, 2, 3, 7,
8, 9, 12, 10, 13, 14,
15, 16, 17, 18, 19, 20, 21, 4, 5, 6
]
df.columns = numerico
labels = [
'email',
'PPI',
'ProgramasSociais',
'ModalidadeEnsino',
# 'Beneficiario',
'QtdDependentes',
'EscolaridadePai',
'EscolaridadeMae',
'RendaPerCapita',
'AtividadeRemunerada',
'SituacaoFinanceira',
'QtdResponsaveisFinanceiros',
'CondicaoTrabalho',
'CondicaoRenda',
'MoraCidadeCampus',
'CondMoradia',
'TipoTransporte',
'NConducoes',
'DoencaCronica',
'Medicacao',
'Deficiencia',
'FDoencaCronica',
'FMedicacao',
]
nomes_ordenados = [df.columns.to_list()[0]] + df.columns.to_list()[2:]
nomes_ordenados.sort()
nomes_ordenados = [df.columns.to_list()[1]] + nomes_ordenados
df = df[nomes_ordenados]
df.columns = labels
condicoes = [
'Desempregado',
'Trabalhador Informal',
'Trabalhador Autônomo',
'Aposentado',
'Empregado CLT',
# 'Pescador/agricultor familiar',
'Beneficiário INSS',
'Funcionário Público'
]
rotulos = [
'Desempregado',
'Informal',
'Autonomo',
'Aposentado',
'CLT',
# 'PescAgriF',
'INSS',
'FuncionarioPublico'
]
for rotulo, cond in zip(rotulos, condicoes):
df[rotulo] = df['CondicaoTrabalho'].map(
lambda x: 'sim' if cond in x else 'nao')
df['MoraCidadeCampus'] = df['MoraCidadeCampus'].apply(
lambda x: x.split(',')[0].lower())
df['TipoTransporte'] = df['TipoTransporte'].apply(
lambda x: ''.join(x.split()[1]).capitalize())
df['AteDois'] = df['QtdResponsaveisFinanceiros']\
.apply(lambda x: 'sim' if ' '
.join(x.split()[:-1]) == '1' or ' '
.join(x.split()[:-1]) == '2' else 'nao')
df[['TipoTransporte', 'QtdResponsaveisFinanceiros',
'MoraCidadeCampus', 'AteDois']].head()
binario = [
'PPI',
'ProgramasSociais',
# 'Beneficiario',
'AtividadeRemunerada',
'MoraCidadeCampus',
'DoencaCronica',
'Medicacao',
'Deficiencia',
'FDoencaCronica',
'FMedicacao',
'AteDois',
'Desempregado',
'Informal',
'Autonomo',
'Aposentado',
'CLT',
# 'PescAgriF',
'INSS',
'FuncionarioPublico'
]
df_binario = pd.DataFrame()
for elemento in binario:
df_binario[elemento] = df[elemento].replace(
['sim', 'nao'], [1, 0]).astype(int)
modalidade_map = {
'Graduação': 1,
'Médio Integrado EJA': 2,
'Médio Técnico Integrado': 4,
'Técnico Subsequente': 3,
}
transporte_map = {
'Pé': 1,
'Próprio': 1,
'Público': 2,
'Alternativo': 3
}
escolaridade_map = {
'Desconheço': 4,
'Não se aplica': 4,
'Sem escolaridade': 4,
'Ensino fundamental': 3,
'Ensino médio': 2,
'Ensino superior': 1,
}
moradia_map = {
'Própria': 1,
'Cedida': 2,
'Financiada': 3,
'Alugada': 4,
'Outros': 4
}
categorias = df['RendaPerCapita'].astype(
'category').cat.categories.tolist()
valores = [3, 2, 9, 8, 7, 6, 5, 4, 10, 1]
renda_percapita_map = {k: v for k, v in zip(categorias, valores)}
categorias = df['SituacaoFinanceira'].astype(
'category').cat.categories.tolist()
valores = [4, 2, 2, 1, 4, 5, 1]
situacao_fin_map = {k: v for k, v in zip(categorias, valores)}
categorias = df['QtdDependentes'].astype(
'category').cat.categories.tolist()
valores = [2, 3, 4, 5, 1]
dependentes_map = {k: v for k, v in zip(categorias, valores)}
categorias = df['NConducoes'].astype('category').cat.categories.tolist()
valores = [2, 3, 1]
conducoes_map = {k: v for k, v in zip(categorias, valores)}
categorias = df['CondicaoRenda'].astype('category').cat.categories.tolist()
valores = [1, 2, 3]
cond_renda_map = {k: v for k, v in zip(categorias, valores)}
labels = [
'CondMoradia',
'TipoTransporte',
'RendaPerCapita',
'SituacaoFinanceira',
'NConducoes',
'CondicaoRenda',
"ModalidadeEnsino",
"EscolaridadeMae",
"EscolaridadePai",
"QtdDependentes"
]
label_encode = df[labels].copy()
label_encode['CondMoradia'].replace(moradia_map, inplace=True)
label_encode['TipoTransporte'].replace(transporte_map, inplace=True)
label_encode['EscolaridadePai'].replace(escolaridade_map, inplace=True)
label_encode['EscolaridadeMae'].replace(escolaridade_map, inplace=True)
label_encode['SituacaoFinanceira'].replace(situacao_fin_map, inplace=True)
label_encode['RendaPerCapita'].replace(renda_percapita_map, inplace=True)
label_encode['QtdDependentes'].replace(dependentes_map, inplace=True)
label_encode['NConducoes'].replace(conducoes_map, inplace=True)
label_encode['CondicaoRenda'].replace(cond_renda_map, inplace=True)
label_encode['ModalidadeEnsino'].replace(modalidade_map, inplace=True)
qtd = pd.DataFrame()
qtd_res = ['ResFin_1', 'ResFin_2', 'ResFin_3', 'ResFin_4ouMais']
opcs = [
'1 membro',
'2 membros',
'3 membros',
'4 ou mais membros'
]
df['QtdResponsaveisFinanceiros'].replace(opcs, qtd_res)
for iqtd in qtd_res:
qtd[iqtd] = df['QtdResponsaveisFinanceiros'].map(
lambda x: int(1) if iqtd in x else int(0))
dados_limpos = pd.concat([df_binario, label_encode, qtd], axis=1)
ordem = ['PPI',
'ProgramasSociais',
'AtividadeRemunerada',
'MoraCidadeCampus',
'DoencaCronica',
'Medicacao',
'Deficiencia',
'FDoencaCronica',
'FMedicacao',
'AteDois',
'Desempregado',
'Informal',
'Autonomo',
'Aposentado',
'CLT',
'INSS',
'FuncionarioPublico',
'ModalidadeEnsino',
'CondMoradia',
'TipoTransporte',
'EscolaridadeMae',
'EscolaridadePai',
'RendaPerCapita',
'SituacaoFinanceira',
'QtdDependentes',
'NConducoes',
'CondicaoRenda',
'ResFin_1',
'ResFin_2',
'ResFin_3',
'ResFin_4ouMais']
dados_limpos = dados_limpos[ordem]
dados_limpos['email'] = df['email']
return np.array(dados_limpos.loc[0]).reshape(1, -1)
| data-clean/clean.py | 6,947 | 'Beneficiario', 'Pescador/agricultor familiar', 'PescAgriF', 'Beneficiario', 'PescAgriF', | 89 | es | 0.248033 |
# -*- coding: utf-8 -*-
# Copyright 2020-2022 CERN
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Authors:
# - Benedikt Ziemons <benedikt.ziemons@cern.ch>, 2020-2021
# - Radu Carpa <radu.carpa@cern.ch>, 2021
# - Mayank Sharma <imptodefeat@gmail.com>, 2021-2022
# - Simon Fayer <simon.fayer05@imperial.ac.uk>, 2021
# - Rakshita Varadarajan <rakshitajps@gmail.com>, 2021
# - Mario Lassnig <mario.lassnig@cern.ch>, 2021
# - Cedric Serfon <cedric.serfon@cern.ch>, 2021
# - Cedric Serfon <cedric.serfon@cern.ch>, 2021-2022
from __future__ import print_function
import traceback
import pytest
# local imports in the fixtures to make this file loadable in e.g. client tests
@pytest.fixture(scope='session')
def vo():
from rucio.tests.common_server import get_vo
return get_vo()
@pytest.fixture(scope='session')
def second_vo():
from rucio.common.config import config_get_bool
from rucio.core.vo import vo_exists, add_vo
multi_vo = config_get_bool('common', 'multi_vo', raise_exception=False, default=False)
if not multi_vo:
pytest.skip('multi_vo mode is not enabled. Running multi_vo tests in single_vo mode would result in failures.')
new_vo = 'new'
if not vo_exists(vo=new_vo):
add_vo(vo=new_vo, description='Test', email='rucio@email.com')
return new_vo
@pytest.fixture(scope='session')
def long_vo():
from rucio.tests.common import get_long_vo
return get_long_vo()
@pytest.fixture(scope='module')
def replica_client():
from rucio.client.replicaclient import ReplicaClient
return ReplicaClient()
@pytest.fixture(scope='module')
def rucio_client():
from rucio.client import Client
return Client()
@pytest.fixture(scope='module')
def did_client():
from rucio.client.didclient import DIDClient
return DIDClient()
@pytest.fixture
def rest_client():
from rucio.tests.common import print_response
from flask.testing import FlaskClient
from rucio.web.rest.flaskapi.v1.main import application
class WrappedFlaskClient(FlaskClient):
def __init__(self, *args, **kwargs):
super(WrappedFlaskClient, self).__init__(*args, **kwargs)
def open(self, path='/', *args, **kwargs):
print(kwargs.get('method', 'GET'), path)
response = super(WrappedFlaskClient, self).open(path, *args, **kwargs)
try:
print_response(response)
except Exception:
traceback.print_exc()
return response
_testing = application.testing
application.testing = True
application.test_client_class = WrappedFlaskClient
with application.test_client() as client:
yield client
application.test_client_class = None
application.testing = _testing
@pytest.fixture
def auth_token(rest_client, long_vo):
from rucio.tests.common import vohdr, headers, loginhdr
auth_response = rest_client.get('/auth/userpass', headers=headers(loginhdr('root', 'ddmlab', 'secret'), vohdr(long_vo)))
assert auth_response.status_code == 200
token = auth_response.headers.get('X-Rucio-Auth-Token')
assert token
return str(token)
@pytest.fixture(scope='module')
def mock_scope(vo):
from rucio.common.types import InternalScope
return InternalScope('mock', vo=vo)
@pytest.fixture(scope='module')
def test_scope(vo):
from rucio.common.types import InternalScope
return InternalScope('test', vo=vo)
@pytest.fixture(scope='module')
def root_account(vo):
from rucio.common.types import InternalAccount
return InternalAccount('root', vo=vo)
@pytest.fixture(scope='module')
def jdoe_account(vo):
from rucio.common.types import InternalAccount
return InternalAccount('jdoe', vo=vo)
@pytest.fixture(scope="module")
def containerized_rses(rucio_client):
"""
Detects if containerized rses for xrootd & ssh are available in the testing environment.
:return: A list of (rse_name, rse_id) tuples.
"""
from rucio.common.exception import InvalidRSEExpression
rses = []
try:
xrd_rses = [x['rse'] for x in rucio_client.list_rses(rse_expression='test_container_xrd=True')]
xrd_rses = [rucio_client.get_rse(rse) for rse in xrd_rses]
xrd_containerized_rses = [(rse_obj['rse'], rse_obj['id']) for rse_obj in xrd_rses if "xrd" in rse_obj['rse'].lower()]
xrd_containerized_rses.sort()
rses.extend(xrd_containerized_rses)
ssh_rses = [x['rse'] for x in rucio_client.list_rses(rse_expression='test_container_ssh=True')]
ssh_rses = [rucio_client.get_rse(rse) for rse in ssh_rses]
ssh_containerized_rses = [(rse_obj['rse'], rse_obj['id']) for rse_obj in ssh_rses if "ssh" in rse_obj['rse'].lower()]
ssh_containerized_rses.sort()
rses.extend(ssh_containerized_rses)
except InvalidRSEExpression as invalid_rse_expression:
print("{ex}. Note that containerized RSEs will not be available in non-containerized test environments"
.format(ex=invalid_rse_expression))
traceback.print_exc()
return rses
@pytest.fixture
def rse_factory(vo):
from rucio.tests.temp_factories import TemporaryRSEFactory
with TemporaryRSEFactory(vo=vo) as factory:
yield factory
@pytest.fixture(scope="class")
def rse_factory_unittest(request, vo):
"""
unittest classes can get access to rse_factory fixture via this fixture
"""
from rucio.tests.temp_factories import TemporaryRSEFactory
with TemporaryRSEFactory(vo=vo) as factory:
request.cls.rse_factory = factory
yield factory
factory.cleanup()
@pytest.fixture
def did_factory(vo, mock_scope):
from rucio.tests.temp_factories import TemporaryDidFactory
with TemporaryDidFactory(vo=vo, default_scope=mock_scope) as factory:
yield factory
@pytest.fixture
def file_factory(tmp_path_factory):
from rucio.tests.temp_factories import TemporaryFileFactory
with TemporaryFileFactory(pytest_path_factory=tmp_path_factory) as factory:
yield factory
@pytest.fixture
def scope_factory():
from rucio.common.utils import generate_uuid
from rucio.core.scope import add_scope
from rucio.common.types import InternalAccount, InternalScope
def create_scopes(vos, account_name=None):
scope_uuid = str(generate_uuid()).lower()[:16]
scope_name = 'shr_%s' % scope_uuid
created_scopes = []
for vo in vos:
scope = InternalScope(scope_name, vo=vo)
add_scope(scope, InternalAccount(account_name if account_name else 'root', vo=vo))
created_scopes.append(scope)
return scope_name, created_scopes
return create_scopes
@pytest.fixture
def db_session():
from rucio.db.sqla import session
db_session = session.get_session()
yield db_session
db_session.commit()
db_session.close()
def __get_fixture_param(request):
fixture_param = getattr(request, "param", None)
if not fixture_param:
# Parametrize support is incomplete for legacy unittest test cases
# Manually retrieve the parameters from the list of marks:
mark = next(iter(filter(lambda m: m.name == 'parametrize', request.instance.pytestmark)), None)
if mark:
fixture_param = mark.args[1][0]
return fixture_param
@pytest.fixture
def core_config_mock(request):
"""
Fixture to allow having per-test core.config tables without affecting the other parallel tests.
This override works only in tests which use core function calls directly, not in the ones working
via the API, because the normal config table is not touched and the rucio instance answering API
calls is not aware of this mock.
This fixture acts by creating a new copy of the "config" sql table using the :memory: sqlite engine.
Accesses to the "models.Config" table are then redirected to this temporary table via mock.patch().
"""
from unittest import mock
from rucio.common.utils import generate_uuid
from sqlalchemy.pool import StaticPool
from rucio.db.sqla.models import ModelBase, BASE, Column, String, PrimaryKeyConstraint
from rucio.db.sqla.session import get_session, get_maker, get_engine, create_engine, declarative_base
# Get the fixture parameters
table_content = []
params = __get_fixture_param(request)
if params:
table_content = params.get("table_content", table_content)
# Create an in-memory dropdown replacement table for the "models.Config" table
engine = create_engine('sqlite://', connect_args={'check_same_thread': False}, poolclass=StaticPool)
InMemoryBase = declarative_base(bind=engine)
class InMemoryConfig(InMemoryBase, ModelBase):
__tablename__ = 'configs_' + generate_uuid()
section = Column(String(128))
opt = Column(String(128))
value = Column(String(4000))
_table_args = (PrimaryKeyConstraint('section', 'opt', name='CONFIGS_PK'), )
InMemoryBase.metadata.create_all()
# Register the new table with the associated engine into the sqlalchemy sessionmaker
# In theory, this code must be protected by rucio.db.scla.session._LOCK, but this code will be executed
# during test case initialization, so there is no risk here to have concurrent calls from within the
# same process
current_engine = get_engine()
get_maker().configure(binds={BASE: current_engine, InMemoryBase: engine})
# Fill the table with the requested mock data
session = get_session()()
for section, option, value in (table_content or []):
InMemoryConfig(section=section, opt=option, value=value).save(flush=True, session=session)
session.commit()
with mock.patch('rucio.core.config.models.Config', new=InMemoryConfig):
yield
@pytest.fixture
def file_config_mock(request):
"""
Fixture which allows to have an isolated in-memory configuration file instance which
is not persisted after exiting the fixture.
This override works only in tests which use config calls directly, not in the ones working
via the API, as the server config is not changed.
"""
from unittest import mock
from rucio.common.config import Config, config_set, config_has_section, config_add_section
# Get the fixture parameters
overrides = []
params = __get_fixture_param(request)
if params:
overrides = params.get("overrides", overrides)
parser = Config().parser
with mock.patch('rucio.common.config.get_config', side_effect=lambda: parser):
for section, option, value in (overrides or []):
if not config_has_section(section):
config_add_section(section)
config_set(section, option, value)
yield
@pytest.fixture
def caches_mock(request):
"""
Fixture which overrides the different internal caches with in-memory ones for the duration
of a particular test.
This override works only in tests which use core function calls directly, not in the ones
working via API.
The fixture acts by by mock.patch the REGION object in the provided list of modules to mock.
"""
from unittest import mock
from contextlib import ExitStack
from dogpile.cache import make_region
caches_to_mock = []
params = __get_fixture_param(request)
if params:
caches_to_mock = params.get("caches_to_mock", caches_to_mock)
with ExitStack() as stack:
mocked_caches = []
for module in caches_to_mock:
region = make_region().configure('dogpile.cache.memory', expiration_time=600)
stack.enter_context(mock.patch(module, new=region))
mocked_caches.append(region)
yield mocked_caches
@pytest.fixture
def metrics_mock():
"""
Overrides the prometheus metric registry and allows to verify if the desired
prometheus metrics were correctly recorded.
"""
from unittest import mock
from prometheus_client import CollectorRegistry
with mock.patch('rucio.core.monitor.REGISTRY', new=CollectorRegistry()) as registry, mock.patch('rucio.core.monitor.COUNTERS', new={}):
yield registry
| lib/rucio/tests/conftest.py | 12,679 | Fixture which overrides the different internal caches with in-memory ones for the duration
of a particular test.
This override works only in tests which use core function calls directly, not in the ones
working via API.
The fixture acts by by mock.patch the REGION object in the provided list of modules to mock.
Detects if containerized rses for xrootd & ssh are available in the testing environment.
:return: A list of (rse_name, rse_id) tuples.
Fixture to allow having per-test core.config tables without affecting the other parallel tests.
This override works only in tests which use core function calls directly, not in the ones working
via the API, because the normal config table is not touched and the rucio instance answering API
calls is not aware of this mock.
This fixture acts by creating a new copy of the "config" sql table using the :memory: sqlite engine.
Accesses to the "models.Config" table are then redirected to this temporary table via mock.patch().
Fixture which allows to have an isolated in-memory configuration file instance which
is not persisted after exiting the fixture.
This override works only in tests which use config calls directly, not in the ones working
via the API, as the server config is not changed.
Overrides the prometheus metric registry and allows to verify if the desired
prometheus metrics were correctly recorded.
unittest classes can get access to rse_factory fixture via this fixture
-*- coding: utf-8 -*- Copyright 2020-2022 CERN Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Authors: - Benedikt Ziemons <benedikt.ziemons@cern.ch>, 2020-2021 - Radu Carpa <radu.carpa@cern.ch>, 2021 - Mayank Sharma <imptodefeat@gmail.com>, 2021-2022 - Simon Fayer <simon.fayer05@imperial.ac.uk>, 2021 - Rakshita Varadarajan <rakshitajps@gmail.com>, 2021 - Mario Lassnig <mario.lassnig@cern.ch>, 2021 - Cedric Serfon <cedric.serfon@cern.ch>, 2021 - Cedric Serfon <cedric.serfon@cern.ch>, 2021-2022 local imports in the fixtures to make this file loadable in e.g. client tests Parametrize support is incomplete for legacy unittest test cases Manually retrieve the parameters from the list of marks: Get the fixture parameters Create an in-memory dropdown replacement table for the "models.Config" table Register the new table with the associated engine into the sqlalchemy sessionmaker In theory, this code must be protected by rucio.db.scla.session._LOCK, but this code will be executed during test case initialization, so there is no risk here to have concurrent calls from within the same process Fill the table with the requested mock data Get the fixture parameters | 3,086 | en | 0.787211 |
# -*- coding: utf-8 -*-
"""
Copyright © 2019-present Lenovo
This file is licensed under both the BSD-3 license for individual/non-commercial use and
EPL-1.0 license for commercial use. Full text of both licenses can be found in
COPYING.BSD and COPYING.EPL files.
"""
import time
from copy import deepcopy
from fnmatch import fnmatch
# Consts
ADMIN = 'ADMIN'
ADMINISTRATORNAME = 'ADMINISTRATORNAME'
COMMONNAME = 'COMMONNAME'
EMAIL = 'EMAIL'
GECOS = 'GECOS'
GIDNUMBER = 'GIDNUMBER'
GIVENNAME = 'GIVENNAME'
GROUP = 'GROUP'
GROUPNAME = 'GROUPNAME'
GROUPPASSWORD = 'GROUPPASSWORD'
HOMEDIRECTORY = 'HOMEDIRECTORY'
HOMEPHONE = 'HOMEPHONE'
LOGINSHELL = 'LOGINSHELL'
MEMBERNAME = 'MEMBERNAME'
PROMPT = 'PROMPT'
ROOMNUMBER = 'ROOMNUMBER'
SHADOWEXPIRE = 'SHADOWEXPIRE'
SHADOWFLAG = 'SHADOWFLAG'
SHADOWINACTIVE = 'SHADOWINACTIVE'
SHADOWLASTCHANGE = 'SHADOWLASTCHANGE'
SHADOWMAX = 'SHADOWMAX'
SHADOWMIN = 'SHADOWMIN'
SHADOWNAME = 'SHADOWNAME'
SHADOWPASSWORD = 'SHADOWPASSWORD'
SHADOWWARNING = 'SHADOWWARNING'
SN = 'SN'
TELEPHONENUMBER = 'TELEPHONENUMBER'
UIDNUMBER = 'UIDNUMBER'
USER = 'USER'
USERNAME = 'USERNAME'
USERPASSWORD = 'USERPASSWORD'
UT_NAMESIZE = 'UT_NAMESIZE'
VALUE_INVALID_ID = 'VALUE_INVALID_ID'
# a const
class DEFAULT_NO_KEY(object):
def __init__(self, *args, **karg):
pass
class MockEnt(object):
pk = 'AbstructAttrunite'
@classmethod
def init_subclass(cls):
for sub in cls.__subclasses__():
sub.objects = dict()
def __new__(cls, pk):
obj = cls.objects.get(pk, None)
if obj is None:
obj = super(MockEnt, cls).__new__(cls)
obj.pk = pk
return obj
def __init__(self, pk):
self.data = {self.__class__.pk: pk}
def __getattr__(self, key):
return getattr(self.data, key)
def __setitem__(self, key, val):
self.data[key] = val
def __getitem__(self, key):
return self.data[key]
def __repr__(self):
return '<{}:{}: {}>'.format(
self.__class__.__name__,
self.pk,
self.data
)
def __copy__(self):
return self
def __deepcopy__(self, pk):
return self
def __iter__(self):
return iter(self.data)
def save(self, overwrite=True):
default = deepcopy(self.default)
# if no pk
default[self.__class__.pk] = [self.pk]
# set default
for k, v in default.items():
if k not in self.data:
self.data[k] = v
# pre save callback
for k, v in self.presave.items():
if callable(v):
result = v(self, self.data.get(k, None))
if result is not None:
self.data[k] = result
# format
for k, v in self.data.items():
if not isinstance(v, list):
v = [v]
self.data[k] = v
if v[0] is DEFAULT_NO_KEY or isinstance(v[0], DEFAULT_NO_KEY):
del self.data[k]
# not overwrite and exists
if not overwrite and self.pk in self.__class__.objects:
msg = 'error creating a LDAP directory entry: Already exists'
raise RuntimeError(msg)
self.__class__.objects[self.pk] = self
class UserEnt(MockEnt):
pk = USERNAME
default = {
# USERNAME: None,
USERPASSWORD: '{CRYPT}!!',
UIDNUMBER: None,
GIDNUMBER: None,
GECOS: None,
# HOMEDIRECTORY: None,
LOGINSHELL: '/bin/bash/',
# SHADOWPASSWORD: None,
SHADOWLASTCHANGE: None,
SHADOWMIN: None,
SHADOWMAX: None,
SHADOWWARNING: None,
SHADOWINACTIVE: None,
SHADOWEXPIRE: None,
SHADOWFLAG: None,
COMMONNAME: None,
# SN: None,
# ROOMNUMBER: None,
# TELEPHONENUMBER: None,
# HOMEPHONE: None,
# EMAIL: None,
}
def s_USERNAME(self, value):
if not value:
return self.pk
def s_UIDNUMBER(self, value):
if not value:
return int(time.time() % 1 * 10000000)
def s_GIDNUMBER(self, value):
# XXX if create same name group ??
if not value:
return int(time.time() % 1 * 10000000)
def s_GIVENNAME(self, value):
# e[libuser.SN] required by inetOrgPerson schema, but not provided
if (GIVENNAME in self.data) and (SN not in self.data):
raise RuntimeError
def s_GECOS(self, value):
if isinstance(value, list):
value = value[0]
if value:
attrs = value.split(',') if (value and ',' in value) else []
attrs.extend([None] * (4 - len(attrs)))
common_name, building, phone_office, phone_home = attrs
if common_name is not None:
self.data[COMMONNAME] = common_name
else:
return None
def s_SHADOWPASSWORD(self, value):
self.data.pop(SHADOWPASSWORD, None)
def s_HOMEDIRECTORY(self, value):
# testUserAdd5
if value is None:
if self.pk[0] == '.':
raise RuntimeError
else:
return '/home/' + self.pk
presave = {
USERNAME: s_USERNAME,
UIDNUMBER: s_UIDNUMBER,
GIDNUMBER: s_GIDNUMBER,
GIVENNAME: s_GIVENNAME,
GECOS: s_GECOS,
SHADOWPASSWORD: s_SHADOWPASSWORD,
HOMEDIRECTORY: s_HOMEDIRECTORY
}
class GroupEnt(MockEnt):
pk = GROUPNAME
default = {
GIDNUMBER: None,
GROUPPASSWORD: DEFAULT_NO_KEY,
}
def s_GROUPNAME(self, value):
if not value:
return self.pk
def s_MEMBERNAME(self, value):
membername = self.data.get(MEMBERNAME, None)
if isinstance(membername, basestring):
membername = [membername, ]
return membername
def s_GIDNUMBER(self, value):
if not value:
return int(time.time() % 1 * 10000000)
def s_GROUPPASSWORD(self, value):
'''if set USERPASSWORD of group GROUPPASSWORD same as it
if not any value set, key should not exists
'''
if value in (None, DEFAULT_NO_KEY):
user_pwd = self.data.get(USERPASSWORD, None)
if user_pwd is not None:
return user_pwd
else:
return DEFAULT_NO_KEY
presave = {
GROUPNAME: s_GROUPNAME,
MEMBERNAME: s_MEMBERNAME,
GIDNUMBER: s_GIDNUMBER,
# USERPASSWORD: d_G,
GROUPPASSWORD: s_GROUPPASSWORD, # default no key
}
class MockLibuserAdmin(object):
pk = USERNAME
def __init__(self):
pass
def initUser(self, name):
return UserEnt(name)
def initGroup(self, name):
return GroupEnt(name)
def addUser(self, ent, create_home=True, create_mail_spool=True):
ent.save(overwrite=False)
return 1
def addGroup(self, ent):
ent.save(overwrite=False)
return 1
def setpassUser(self, ent, password, use_crypt=True):
return self._setpassEnt(USERPASSWORD, ent, password, use_crypt)
def setpassGroup(self, ent, password, use_crypt=True):
return self._setpassEnt(GROUPPASSWORD, ent, password, use_crypt)
def _setpassEnt(self, password_attr, ent, password, use_crypt):
if not use_crypt: # ...
pass
# password = crypt(password)
if password_attr not in ent:
ent[password_attr] = [None]
if len(ent[password_attr]) == 1:
ent[password_attr] = ['{CRYPT}' + password]
elif len(ent[password_attr]) == 2:
ent[password_attr][1] = '{CRYPT}' + password
else:
raise Exception('What ?')
ent.save()
def lookupUserByName(self, name):
for i in UserEnt.objects.values():
if i[USERNAME] == [name]:
deepcopy(i)
return deepcopy(i)
return None
def lookupGroupByName(self, name):
for i in GroupEnt.objects.values():
if i[GROUPNAME] == [name]:
return deepcopy(i)
return None
def enumerateUsersFull(self, name=None):
return [
deepcopy(v)
for k, v in UserEnt.objects.items()
if (name is None) or fnmatch(k, name)
]
def enumerateUsers(self, name=None):
return [
deepcopy(i)
for i in UserEnt.objects
if (name is None) or fnmatch(i, name)
]
def enumerateGroupsFull(self, name=None):
return [
deepcopy(v)
for k, v in GroupEnt.objects.items()
if (name is None) or fnmatch(k, name)
]
def enumerateGroups(self, name=None):
return [
deepcopy(i)
for i in GroupEnt.objects
if (name is None) or fnmatch(i, name)
]
def enumerateGroupsByUserFull(self, name):
user = self.lookupUserByName(name)
gid = user[GIDNUMBER]
return [
i
for i in GroupEnt.objects.values()
if (i[GIDNUMBER] == gid) or (name in i.get(MEMBERNAME, []))
]
def enumerateGroupsByUser(self, name):
return [i[GROUPNAME][0]
for i in self.enumerateGroupsByUserFull(name)]
def lookupUserById(self, id):
for i in UserEnt.objects.values():
if i[UIDNUMBER] == [id]:
return deepcopy(i)
return None
def lookupGroupById(self, id):
for i in GroupEnt.objects.values():
if i[GIDNUMBER] == [id]:
return deepcopy(i)
return None
def enumerateUsersByGroupFull(self, name):
group = self.lookupGroupByName(name)
gid = group[GIDNUMBER]
users_gid_match = [
deepcopy(i)
for i in UserEnt.objects.values()
if i[GIDNUMBER] == gid
]
users_member_match = [
self.lookupUserByName(i) for i in group.get(MEMBERNAME, [])
]
# remove repeated
users = {i.pk: i for i in (users_gid_match + users_member_match)}
return users.values()
def enumerateUsersByGroup(self, name):
return [i[USERNAME][0]
for i in self.enumerateUsersByGroupFull(name)]
def modifyUser(self, ent, renew_home=False):
self._modifyEnt(ent)
def modifyGroup(self, ent):
self._modifyEnt(ent)
def _modifyEnt(self, ent):
# new pk attr value != current pk value
old_pk = ent.pk
new_pk = ent[ent.__class__.pk][0]
if new_pk != old_pk:
ent.pk = new_pk
if new_pk in ent.objects: # other exists
raise RuntimeError
else:
del ent.objects[old_pk] # remove old
ent.save()
def deleteUser(self, ent, remove_hone=False, remove_mail_spool=False):
del UserEnt.objects[ent.pk]
def deleteGroup(self, ent):
if not hasattr(ent, 'pk'):
return True
del GroupEnt.objects[ent.pk]
return True
def removepassUser(self, ent):
self._removepassEnt(USERPASSWORD, ent)
def removepassGroup(self, ent):
self._removepassEnt(GROUPPASSWORD, ent)
def _removepassEnt(self, password_attr, ent):
if len(ent[password_attr]) == 1:
ent[password_attr] = '{CRYPT}'
elif len(ent[password_attr]) == 2:
if '{CRYPT}' in ent[password_attr][1]:
ent[password_attr][1] = '{CRYPT}'
else:
ent[password_attr] = ['{CRYPT}']
else:
raise Exception('What ?')
ent[SHADOWLASTCHANGE] = 10000 + 1 # testUserRemovepass1
ent.save()
def lockUser(self, ent):
return self._lockEnt(USERPASSWORD, ent)
def lockGroup(self, ent):
return self._lockEnt(GROUPPASSWORD, ent)
def _lockEnt(self, password_attr, ent):
password = ent[password_attr][0]
if '{CRYPT}' not in password:
raise RuntimeError
password = password.replace('{CRYPT}!', '{CRYPT}')
password = password.replace('{CRYPT}', '{CRYPT}!')
ent[password_attr] = password
ent.save()
def unlockUser(self, ent, empty_passwrod=False):
return self._unlockEnt(USERPASSWORD, ent, empty_passwrod)
def unlockGroup(self, ent, empty_passwrod=False):
return self._unlockEnt(GROUPPASSWORD, ent, empty_passwrod)
def _unlockEnt(self, password_attr, ent, empty_passwrod=False):
password = ent[password_attr][0]
if '{CRYPT}' not in password:
raise RuntimeError
if empty_passwrod:
if password == '{CRYPT}!':
raise RuntimeError
password = password.replace('{CRYPT}!', '{CRYPT}')
else:
password = password.replace('{CRYPT}!', '{CRYPT}')
ent[password_attr] = password
ent.save()
def userIsLocked(self, ent):
password = ent[USERPASSWORD][0]
return '{CRYPT}!' in password
def groupIsLocked(self, ent):
password = ent[GROUPPASSWORD][0]
return '{CRYPT}!' in password
MockEnt.init_subclass()
mock_admin = MockLibuserAdmin()
def admin(prompt=None):
return mock_admin
| antilles-core/openHPC_web_project/tests/user/mock_libuser.py | 13,314 | if set USERPASSWORD of group GROUPPASSWORD same as it
if not any value set, key should not exists
Copyright © 2019-present Lenovo
This file is licensed under both the BSD-3 license for individual/non-commercial use and
EPL-1.0 license for commercial use. Full text of both licenses can be found in
COPYING.BSD and COPYING.EPL files.
-*- coding: utf-8 -*- Consts a const if no pk set default pre save callback format not overwrite and exists USERNAME: None, HOMEDIRECTORY: None, SHADOWPASSWORD: None, SN: None, ROOMNUMBER: None, TELEPHONENUMBER: None, HOMEPHONE: None, EMAIL: None, XXX if create same name group ?? e[libuser.SN] required by inetOrgPerson schema, but not provided testUserAdd5 USERPASSWORD: d_G, default no key ... password = crypt(password) remove repeated new pk attr value != current pk value other exists remove old testUserRemovepass1 | 868 | en | 0.619667 |
# Generated by Django 1.11.24 on 2019-10-16 22:48
from typing import Any, Set, Union
import ujson
from django.conf import settings
from django.contrib.auth.hashers import check_password, make_password
from django.db import migrations
from django.db.backends.postgresql.schema import DatabaseSchemaEditor
from django.db.migrations.state import StateApps
from django.utils.timezone import now as timezone_now
from zerver.lib.cache import cache_delete, user_profile_by_api_key_cache_key
from zerver.lib.queue import queue_json_publish
from zerver.lib.utils import generate_api_key
def ensure_no_empty_passwords(apps: StateApps, schema_editor: DatabaseSchemaEditor) -> None:
"""With CVE-2019-18933, it was possible for certain users created
using social login (e.g. Google/GitHub auth) to have the empty
string as their password in the Zulip database, rather than
Django's "unusable password" (i.e. no password at all). This was a
serious security issue for organizations with both password and
Google/GitHub authentication enabled.
Combined with the code changes to prevent new users from entering
this buggy state, this migration sets the intended "no password"
state for any users who are in this buggy state, as had been
intended.
While this bug was discovered by our own development team and we
believe it hasn't been exploited in the wild, out of an abundance
of caution, this migration also resets the personal API keys for
all users where Zulip's database-level logging cannot **prove**
that user's current personal API key was never accessed using this
bug.
There are a few ways this can be proven: (1) the user's password
has never been changed and is not the empty string,
or (2) the user's personal API key has changed since that user last
changed their password (which is not ''). Both constitute proof
because this bug cannot be used to gain the access required to change
or reset a user's password.
Resetting those API keys has the effect of logging many users out
of the Zulip mobile and terminal apps unnecessarily (e.g. because
the user changed their password at any point in the past, even
though the user never was affected by the bug), but we're
comfortable with that cost for ensuring that this bug is
completely fixed.
To avoid this inconvenience for self-hosted servers which don't
even have EmailAuthBackend enabled, we skip resetting any API keys
if the server doesn't have EmailAuthBackend configured.
"""
UserProfile = apps.get_model('zerver', 'UserProfile')
RealmAuditLog = apps.get_model('zerver', 'RealmAuditLog')
# Because we're backporting this migration to the Zulip 2.0.x
# series, we've given it migration number 0209, which is a
# duplicate with an existing migration already merged into Zulip
# master. Migration 0247_realmauditlog_event_type_to_int.py
# changes the format of RealmAuditLog.event_type, so we need the
# following conditional block to determine what values to use when
# searching for the relevant events in that log.
event_type_class = RealmAuditLog._meta.get_field('event_type').get_internal_type()
if event_type_class == 'CharField':
USER_PASSWORD_CHANGED: Union[int, str] = 'user_password_changed'
USER_API_KEY_CHANGED: Union[int, str] = 'user_api_key_changed'
else:
USER_PASSWORD_CHANGED = 122
USER_API_KEY_CHANGED = 127
# First, we do some bulk queries to collect data we'll find useful
# in the loop over all users below.
# Users who changed their password at any time since account
# creation. These users could theoretically have started with an
# empty password, but set a password later via the password reset
# flow. If their API key has changed since they changed their
# password, we can prove their current API key cannot have been
# exposed; we store those users in
# password_change_user_ids_no_reset_needed.
password_change_user_ids = set(RealmAuditLog.objects.filter(
event_type=USER_PASSWORD_CHANGED).values_list("modified_user_id", flat=True))
password_change_user_ids_api_key_reset_needed: Set[int] = set()
password_change_user_ids_no_reset_needed: Set[int] = set()
for user_id in password_change_user_ids:
# Here, we check the timing for users who have changed
# their password.
# We check if the user changed their API key since their first password change.
query = RealmAuditLog.objects.filter(
modified_user=user_id, event_type__in=[USER_PASSWORD_CHANGED,
USER_API_KEY_CHANGED]
).order_by("event_time")
earliest_password_change = query.filter(event_type=USER_PASSWORD_CHANGED).first()
# Since these users are in password_change_user_ids, this must not be None.
assert earliest_password_change is not None
latest_api_key_change = query.filter(event_type=USER_API_KEY_CHANGED).last()
if latest_api_key_change is None:
# This user has never changed their API key. As a
# result, even though it's very likely this user never
# had an empty password, they have changed their
# password, and we have no record of the password's
# original hash, so we can't prove the user's API key
# was never affected. We schedule this user's API key
# to be reset.
password_change_user_ids_api_key_reset_needed.add(user_id)
elif earliest_password_change.event_time <= latest_api_key_change.event_time:
# This user has changed their password before
# generating their current personal API key, so we can
# prove their current personal API key could not have
# been exposed by this bug.
password_change_user_ids_no_reset_needed.add(user_id)
else:
password_change_user_ids_api_key_reset_needed.add(user_id)
if password_change_user_ids_no_reset_needed and settings.PRODUCTION:
# We record in this log file users whose current API key was
# generated after a real password was set, so there's no need
# to reset their API key, but because they've changed their
# password, we don't know whether or not they originally had a
# buggy password.
#
# In theory, this list can be recalculated using the above
# algorithm modified to only look at events before the time
# this migration was installed, but it's helpful to log it as well.
with open("/var/log/zulip/0209_password_migration.log", "w") as log_file:
line = "No reset needed, but changed password: {}\n"
log_file.write(line.format(password_change_user_ids_no_reset_needed))
AFFECTED_USER_TYPE_EMPTY_PASSWORD = 'empty_password'
AFFECTED_USER_TYPE_CHANGED_PASSWORD = 'changed_password'
MIGRATION_ID = '0209_user_profile_no_empty_password'
def write_realm_audit_log_entry(user_profile: Any,
event_time: Any, event_type: Any,
affected_user_type: str) -> None:
RealmAuditLog.objects.create(
realm=user_profile.realm,
modified_user=user_profile,
event_type=event_type,
event_time=event_time,
extra_data=ujson.dumps({
'migration_id': MIGRATION_ID,
'affected_user_type': affected_user_type,
})
)
# If Zulip's built-in password authentication is not enabled on
# the server level, then we plan to skip resetting any users' API
# keys, since the bug requires EmailAuthBackend.
email_auth_enabled = 'zproject.backends.EmailAuthBackend' in settings.AUTHENTICATION_BACKENDS
# A quick note: This query could in theory exclude users with
# is_active=False, is_bot=True, or realm__deactivated=True here to
# accessing only active human users in non-deactivated realms.
# But it's better to just be thorough; users can be reactivated,
# and e.g. a server admin could manually edit the database to
# change a bot into a human user if they really wanted to. And
# there's essentially no harm in rewriting state for a deactivated
# account.
for user_profile in UserProfile.objects.all():
event_time = timezone_now()
if check_password('', user_profile.password):
# This user currently has the empty string as their password.
# Change their password and record that we did so.
user_profile.password = make_password(None)
update_fields = ["password"]
write_realm_audit_log_entry(user_profile, event_time,
USER_PASSWORD_CHANGED,
AFFECTED_USER_TYPE_EMPTY_PASSWORD)
if email_auth_enabled and not user_profile.is_bot:
# As explained above, if the built-in password authentication
# is enabled, reset the API keys. We can skip bot accounts here,
# because the `password` attribute on a bot user is useless.
reset_user_api_key(user_profile)
update_fields.append("api_key")
event_time = timezone_now()
write_realm_audit_log_entry(user_profile, event_time,
USER_API_KEY_CHANGED,
AFFECTED_USER_TYPE_EMPTY_PASSWORD)
user_profile.save(update_fields=update_fields)
continue
elif email_auth_enabled and \
user_profile.id in password_change_user_ids_api_key_reset_needed:
# For these users, we just need to reset the API key.
reset_user_api_key(user_profile)
user_profile.save(update_fields=["api_key"])
write_realm_audit_log_entry(user_profile, event_time,
USER_API_KEY_CHANGED,
AFFECTED_USER_TYPE_CHANGED_PASSWORD)
def reset_user_api_key(user_profile: Any) -> None:
old_api_key = user_profile.api_key
user_profile.api_key = generate_api_key()
cache_delete(user_profile_by_api_key_cache_key(old_api_key))
# Like with any API key change, we need to clear any server-side
# state for sending push notifications to mobile app clients that
# could have been registered with the old API key. Fortunately,
# we can just write to the queue processor that handles sending
# those notices to the push notifications bouncer service.
event = {'type': 'clear_push_device_tokens',
'user_profile_id': user_profile.id}
queue_json_publish("deferred_work", event)
class Migration(migrations.Migration):
atomic = False
dependencies = [
('zerver', '0208_add_realm_night_logo_fields'),
]
operations = [
migrations.RunPython(ensure_no_empty_passwords,
reverse_code=migrations.RunPython.noop),
]
| zerver/migrations/0209_user_profile_no_empty_password.py | 11,210 | With CVE-2019-18933, it was possible for certain users created
using social login (e.g. Google/GitHub auth) to have the empty
string as their password in the Zulip database, rather than
Django's "unusable password" (i.e. no password at all). This was a
serious security issue for organizations with both password and
Google/GitHub authentication enabled.
Combined with the code changes to prevent new users from entering
this buggy state, this migration sets the intended "no password"
state for any users who are in this buggy state, as had been
intended.
While this bug was discovered by our own development team and we
believe it hasn't been exploited in the wild, out of an abundance
of caution, this migration also resets the personal API keys for
all users where Zulip's database-level logging cannot **prove**
that user's current personal API key was never accessed using this
bug.
There are a few ways this can be proven: (1) the user's password
has never been changed and is not the empty string,
or (2) the user's personal API key has changed since that user last
changed their password (which is not ''). Both constitute proof
because this bug cannot be used to gain the access required to change
or reset a user's password.
Resetting those API keys has the effect of logging many users out
of the Zulip mobile and terminal apps unnecessarily (e.g. because
the user changed their password at any point in the past, even
though the user never was affected by the bug), but we're
comfortable with that cost for ensuring that this bug is
completely fixed.
To avoid this inconvenience for self-hosted servers which don't
even have EmailAuthBackend enabled, we skip resetting any API keys
if the server doesn't have EmailAuthBackend configured.
Generated by Django 1.11.24 on 2019-10-16 22:48 Because we're backporting this migration to the Zulip 2.0.x series, we've given it migration number 0209, which is a duplicate with an existing migration already merged into Zulip master. Migration 0247_realmauditlog_event_type_to_int.py changes the format of RealmAuditLog.event_type, so we need the following conditional block to determine what values to use when searching for the relevant events in that log. First, we do some bulk queries to collect data we'll find useful in the loop over all users below. Users who changed their password at any time since account creation. These users could theoretically have started with an empty password, but set a password later via the password reset flow. If their API key has changed since they changed their password, we can prove their current API key cannot have been exposed; we store those users in password_change_user_ids_no_reset_needed. Here, we check the timing for users who have changed their password. We check if the user changed their API key since their first password change. Since these users are in password_change_user_ids, this must not be None. This user has never changed their API key. As a result, even though it's very likely this user never had an empty password, they have changed their password, and we have no record of the password's original hash, so we can't prove the user's API key was never affected. We schedule this user's API key to be reset. This user has changed their password before generating their current personal API key, so we can prove their current personal API key could not have been exposed by this bug. We record in this log file users whose current API key was generated after a real password was set, so there's no need to reset their API key, but because they've changed their password, we don't know whether or not they originally had a buggy password. In theory, this list can be recalculated using the above algorithm modified to only look at events before the time this migration was installed, but it's helpful to log it as well. If Zulip's built-in password authentication is not enabled on the server level, then we plan to skip resetting any users' API keys, since the bug requires EmailAuthBackend. A quick note: This query could in theory exclude users with is_active=False, is_bot=True, or realm__deactivated=True here to accessing only active human users in non-deactivated realms. But it's better to just be thorough; users can be reactivated, and e.g. a server admin could manually edit the database to change a bot into a human user if they really wanted to. And there's essentially no harm in rewriting state for a deactivated account. This user currently has the empty string as their password. Change their password and record that we did so. As explained above, if the built-in password authentication is enabled, reset the API keys. We can skip bot accounts here, because the `password` attribute on a bot user is useless. For these users, we just need to reset the API key. Like with any API key change, we need to clear any server-side state for sending push notifications to mobile app clients that could have been registered with the old API key. Fortunately, we can just write to the queue processor that handles sending those notices to the push notifications bouncer service. | 5,122 | en | 0.954521 |
from flask import g
import logging
from datetime import datetime
import config
def get_logger(name):
# type: (str) -> logging.Logger
logging.basicConfig()
logger = logging.getLogger(name)
logger.setLevel(config.GLOBAL_LOGGING_LEVEL)
ch = logging.StreamHandler()
ch.setLevel(config.GLOBAL_LOGGING_LEVEL)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
ch.setFormatter(formatter)
# logger.addHandler(ch)
return logger
logger = get_logger('util')
def get_db_client(conn_pool, *args, **kws):
logger.debug("Getting DB Connection")
if 'db' not in g:
logger.debug("Creating new DB connection")
g.db = conn_pool.get()
return g.db
def teardown_db(conn_pool):
db = g.pop('db', None)
if db is not None:
conn_pool.put(db) | app_util.py | 792 | type: (str) -> logging.Logger logger.addHandler(ch) | 52 | en | 0.287958 |
# coding: utf-8
"""
TheTVDB API v2
API v3 targets v2 functionality with a few minor additions. The API is accessible via https://api.thetvdb.com and provides the following REST endpoints in JSON format. How to use this API documentation ---------------- You may browse the API routes without authentication, but if you wish to send requests to the API and see response data, then you must authenticate. 1. Obtain a JWT token by `POST`ing to the `/login` route in the `Authentication` section with your API key and credentials. 1. Paste the JWT token from the response into the \"JWT Token\" field at the top of the page and click the 'Add Token' button. You will now be able to use the remaining routes to send requests to the API and get a response. Language Selection ---------------- Language selection is done via the `Accept-Language` header. At the moment, you may only pass one language abbreviation in the header at a time. Valid language abbreviations can be found at the `/languages` route.. Authentication ---------------- Authentication to use the API is similar to the How-to section above. Users must `POST` to the `/login` route with their API key and credentials in the following format in order to obtain a JWT token. `{\"apikey\":\"APIKEY\",\"username\":\"USERNAME\",\"userkey\":\"USERKEY\"}` Note that the username and key are ONLY required for the `/user` routes. The user's key is labled `Account Identifier` in the account section of the main site. The token is then used in all subsequent requests by providing it in the `Authorization` header. The header will look like: `Authorization: Bearer <yourJWTtoken>`. Currently, the token expires after 24 hours. You can `GET` the `/refresh_token` route to extend that expiration date. Versioning ---------------- You may request a different version of the API by including an `Accept` header in your request with the following format: `Accept:application/vnd.thetvdb.v$VERSION`. This documentation automatically uses the version seen at the top and bottom of the page. # noqa: E501
OpenAPI spec version: 3.0.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class Movie(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'artworks': 'list[MovieArtwork]',
'genres': 'list[MovieGenre]',
'id': 'int',
'people': 'MoviePeople',
'release_dates': 'list[MovieReleaseDate]',
'remoteids': 'list[MovieRemoteId]',
'runtime': 'int',
'trailers': 'list[MovieTrailer]',
'translations': 'list[MovieTranslation]',
'url': 'str'
}
attribute_map = {
'artworks': 'artworks',
'genres': 'genres',
'id': 'id',
'people': 'people',
'release_dates': 'release_dates',
'remoteids': 'remoteids',
'runtime': 'runtime',
'trailers': 'trailers',
'translations': 'translations',
'url': 'url'
}
def __init__(self, artworks=None, genres=None, id=None, people=None, release_dates=None, remoteids=None, runtime=None, trailers=None, translations=None, url=None): # noqa: E501
"""Movie - a model defined in Swagger""" # noqa: E501
self._artworks = None
self._genres = None
self._id = None
self._people = None
self._release_dates = None
self._remoteids = None
self._runtime = None
self._trailers = None
self._translations = None
self._url = None
self.discriminator = None
if artworks is not None:
self.artworks = artworks
if genres is not None:
self.genres = genres
if id is not None:
self.id = id
if people is not None:
self.people = people
if release_dates is not None:
self.release_dates = release_dates
if remoteids is not None:
self.remoteids = remoteids
if runtime is not None:
self.runtime = runtime
if trailers is not None:
self.trailers = trailers
if translations is not None:
self.translations = translations
if url is not None:
self.url = url
@property
def artworks(self):
"""Gets the artworks of this Movie. # noqa: E501
:return: The artworks of this Movie. # noqa: E501
:rtype: list[MovieArtwork]
"""
return self._artworks
@artworks.setter
def artworks(self, artworks):
"""Sets the artworks of this Movie.
:param artworks: The artworks of this Movie. # noqa: E501
:type: list[MovieArtwork]
"""
self._artworks = artworks
@property
def genres(self):
"""Gets the genres of this Movie. # noqa: E501
:return: The genres of this Movie. # noqa: E501
:rtype: list[MovieGenre]
"""
return self._genres
@genres.setter
def genres(self, genres):
"""Sets the genres of this Movie.
:param genres: The genres of this Movie. # noqa: E501
:type: list[MovieGenre]
"""
self._genres = genres
@property
def id(self):
"""Gets the id of this Movie. # noqa: E501
:return: The id of this Movie. # noqa: E501
:rtype: int
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this Movie.
:param id: The id of this Movie. # noqa: E501
:type: int
"""
self._id = id
@property
def people(self):
"""Gets the people of this Movie. # noqa: E501
:return: The people of this Movie. # noqa: E501
:rtype: MoviePeople
"""
return self._people
@people.setter
def people(self, people):
"""Sets the people of this Movie.
:param people: The people of this Movie. # noqa: E501
:type: MoviePeople
"""
self._people = people
@property
def release_dates(self):
"""Gets the release_dates of this Movie. # noqa: E501
:return: The release_dates of this Movie. # noqa: E501
:rtype: list[MovieReleaseDate]
"""
return self._release_dates
@release_dates.setter
def release_dates(self, release_dates):
"""Sets the release_dates of this Movie.
:param release_dates: The release_dates of this Movie. # noqa: E501
:type: list[MovieReleaseDate]
"""
self._release_dates = release_dates
@property
def remoteids(self):
"""Gets the remoteids of this Movie. # noqa: E501
:return: The remoteids of this Movie. # noqa: E501
:rtype: list[MovieRemoteId]
"""
return self._remoteids
@remoteids.setter
def remoteids(self, remoteids):
"""Sets the remoteids of this Movie.
:param remoteids: The remoteids of this Movie. # noqa: E501
:type: list[MovieRemoteId]
"""
self._remoteids = remoteids
@property
def runtime(self):
"""Gets the runtime of this Movie. # noqa: E501
:return: The runtime of this Movie. # noqa: E501
:rtype: int
"""
return self._runtime
@runtime.setter
def runtime(self, runtime):
"""Sets the runtime of this Movie.
:param runtime: The runtime of this Movie. # noqa: E501
:type: int
"""
self._runtime = runtime
@property
def trailers(self):
"""Gets the trailers of this Movie. # noqa: E501
:return: The trailers of this Movie. # noqa: E501
:rtype: list[MovieTrailer]
"""
return self._trailers
@trailers.setter
def trailers(self, trailers):
"""Sets the trailers of this Movie.
:param trailers: The trailers of this Movie. # noqa: E501
:type: list[MovieTrailer]
"""
self._trailers = trailers
@property
def translations(self):
"""Gets the translations of this Movie. # noqa: E501
:return: The translations of this Movie. # noqa: E501
:rtype: list[MovieTranslation]
"""
return self._translations
@translations.setter
def translations(self, translations):
"""Sets the translations of this Movie.
:param translations: The translations of this Movie. # noqa: E501
:type: list[MovieTranslation]
"""
self._translations = translations
@property
def url(self):
"""Gets the url of this Movie. # noqa: E501
:return: The url of this Movie. # noqa: E501
:rtype: str
"""
return self._url
@url.setter
def url(self, url):
"""Sets the url of this Movie.
:param url: The url of this Movie. # noqa: E501
:type: str
"""
self._url = url
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(Movie, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, Movie):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| tvdb_api/models/movie.py | 10,825 | NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Returns true if both objects are equal
Movie - a model defined in Swagger
Returns true if both objects are not equal
For `print` and `pprint`
Gets the artworks of this Movie. # noqa: E501
:return: The artworks of this Movie. # noqa: E501
:rtype: list[MovieArtwork]
Sets the artworks of this Movie.
:param artworks: The artworks of this Movie. # noqa: E501
:type: list[MovieArtwork]
Gets the genres of this Movie. # noqa: E501
:return: The genres of this Movie. # noqa: E501
:rtype: list[MovieGenre]
Sets the genres of this Movie.
:param genres: The genres of this Movie. # noqa: E501
:type: list[MovieGenre]
Gets the id of this Movie. # noqa: E501
:return: The id of this Movie. # noqa: E501
:rtype: int
Sets the id of this Movie.
:param id: The id of this Movie. # noqa: E501
:type: int
Gets the people of this Movie. # noqa: E501
:return: The people of this Movie. # noqa: E501
:rtype: MoviePeople
Sets the people of this Movie.
:param people: The people of this Movie. # noqa: E501
:type: MoviePeople
Gets the release_dates of this Movie. # noqa: E501
:return: The release_dates of this Movie. # noqa: E501
:rtype: list[MovieReleaseDate]
Sets the release_dates of this Movie.
:param release_dates: The release_dates of this Movie. # noqa: E501
:type: list[MovieReleaseDate]
Gets the remoteids of this Movie. # noqa: E501
:return: The remoteids of this Movie. # noqa: E501
:rtype: list[MovieRemoteId]
Sets the remoteids of this Movie.
:param remoteids: The remoteids of this Movie. # noqa: E501
:type: list[MovieRemoteId]
Gets the runtime of this Movie. # noqa: E501
:return: The runtime of this Movie. # noqa: E501
:rtype: int
Sets the runtime of this Movie.
:param runtime: The runtime of this Movie. # noqa: E501
:type: int
Returns the model properties as a dict
Returns the string representation of the model
Gets the trailers of this Movie. # noqa: E501
:return: The trailers of this Movie. # noqa: E501
:rtype: list[MovieTrailer]
Sets the trailers of this Movie.
:param trailers: The trailers of this Movie. # noqa: E501
:type: list[MovieTrailer]
Gets the translations of this Movie. # noqa: E501
:return: The translations of this Movie. # noqa: E501
:rtype: list[MovieTranslation]
Sets the translations of this Movie.
:param translations: The translations of this Movie. # noqa: E501
:type: list[MovieTranslation]
Gets the url of this Movie. # noqa: E501
:return: The url of this Movie. # noqa: E501
:rtype: str
Sets the url of this Movie.
:param url: The url of this Movie. # noqa: E501
:type: str
TheTVDB API v2
API v3 targets v2 functionality with a few minor additions. The API is accessible via https://api.thetvdb.com and provides the following REST endpoints in JSON format. How to use this API documentation ---------------- You may browse the API routes without authentication, but if you wish to send requests to the API and see response data, then you must authenticate. 1. Obtain a JWT token by `POST`ing to the `/login` route in the `Authentication` section with your API key and credentials. 1. Paste the JWT token from the response into the "JWT Token" field at the top of the page and click the 'Add Token' button. You will now be able to use the remaining routes to send requests to the API and get a response. Language Selection ---------------- Language selection is done via the `Accept-Language` header. At the moment, you may only pass one language abbreviation in the header at a time. Valid language abbreviations can be found at the `/languages` route.. Authentication ---------------- Authentication to use the API is similar to the How-to section above. Users must `POST` to the `/login` route with their API key and credentials in the following format in order to obtain a JWT token. `{"apikey":"APIKEY","username":"USERNAME","userkey":"USERKEY"}` Note that the username and key are ONLY required for the `/user` routes. The user's key is labled `Account Identifier` in the account section of the main site. The token is then used in all subsequent requests by providing it in the `Authorization` header. The header will look like: `Authorization: Bearer <yourJWTtoken>`. Currently, the token expires after 24 hours. You can `GET` the `/refresh_token` route to extend that expiration date. Versioning ---------------- You may request a different version of the API by including an `Accept` header in your request with the following format: `Accept:application/vnd.thetvdb.v$VERSION`. This documentation automatically uses the version seen at the top and bottom of the page. # noqa: E501
OpenAPI spec version: 3.0.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
coding: utf-8 noqa: F401 noqa: E501 noqa: E501 | 4,867 | en | 0.730315 |
import math
import gym
from gym import spaces, logger
from gym.utils import seeding
import numpy as np
class CartPoleEnv(gym.Env):
"""
Description:
A pole is attached by an un-actuated joint to a cart, which moves along a frictionless track. The pendulum starts upright, and the goal is to prevent it from falling over by increasing and reducing the cart's velocity.
Source:
This environment corresponds to the version of the cart-pole problem described by Barto, Sutton, and Anderson
Observation:
Type: Box(4)
Num Observation Min Max
0 Cart Position -4.8 4.8
1 Cart Velocity -Inf Inf
2 Pole Angle -24 deg 24 deg
3 Pole Velocity At Tip -Inf Inf
Actions:
Type: Discrete(2)
Num Action
0 Push cart to the left
1 Push cart to the right
Note: The amount the velocity that is reduced or increased is not fixed; it depends on the angle the pole is pointing. This is because the center of gravity of the pole increases the amount of energy needed to move the cart underneath it
Reward:
Reward is 1 for every step taken, including the termination step
Starting State:
All observations are assigned a uniform random value in [-0.05..0.05]
Episode Termination:
Pole Angle is more than 12 degrees
Cart Position is more than 2.4 (center of the cart reaches the edge of the display)
Episode length is greater than 200
Solved Requirements
Considered solved when the average reward is greater than or equal to 195.0 over 100 consecutive trials.
"""
metadata = {
'render.modes': ['human', 'rgb_array'],
'video.frames_per_second' : 50
}
def __init__(self, force=10.0, length=0.5, mass=0.1):
self.gravity = 9.8
self.masscart = 1.0
self.tau = 0.02 # seconds between state updates
self.kinematics_integrator = 'euler'
if isinstance(force, list):
self.force_mag_set = force
else:
self.force_mag_set = None
self.force_mag = force
if isinstance(length, list):
self.length_set = length
else:
self.length_set = None
self.length = length
if isinstance(mass, list):
self.masspole_set = mass
else:
self.masspole_set = None
self.masspole = mass
# Angle at which to fail the episode
self.theta_threshold_radians = 12 * 2 * math.pi / 360
self.x_threshold = 2.4
# Angle limit set to 2 * theta_threshold_radians so failing observation is still within bounds
high = np.array([
self.x_threshold * 2,
np.finfo(np.float32).max,
self.theta_threshold_radians * 2,
np.finfo(np.float32).max])
self.action_space = spaces.Box(low=-1.0, high=1.0, shape=(1,), dtype=np.float32)
self.observation_space = spaces.Box(-high, high, dtype=np.float32)
self.seed()
self.viewer = None
self.state = None
self.steps_beyond_done = None
self.steps_in_episode = 0
def _sample_parameter(self):
if self.force_mag_set is not None:
set_index = self.np_random.randint(len(self.force_mag_set))
self.force_mag = self.np_random.uniform(
self.force_mag_set[set_index][0],
self.force_mag_set[set_index][1])
if self.length_set is not None:
set_index = self.np_random.randint(len(self.length_set))
self.length = self.np_random.uniform(self.length_set[set_index][0],
self.length_set[set_index][1])
if self.masspole_set is not None:
set_index = self.np_random.randint(len(self.masspole_set))
self.masspole = self.np_random.uniform(
self.masspole_set[set_index][0],
self.masspole_set[set_index][1])
self.polemass_length = (self.masspole * self.length)
self.total_mass = (self.masspole + self.masscart)
def seed(self, seed=None):
self.np_random, seed = seeding.np_random(seed)
return [seed]
def step(self, action):
state = self.state
x, x_dot, theta, theta_dot = state
force = self.force_mag * action[0]
costheta = math.cos(theta)
sintheta = math.sin(theta)
temp = (force + self.polemass_length * theta_dot * theta_dot * sintheta) / self.total_mass
thetaacc = (self.gravity * sintheta - costheta* temp) / (self.length * (4.0/3.0 - self.masspole * costheta * costheta / self.total_mass))
xacc = temp - self.polemass_length * thetaacc * costheta / self.total_mass
if self.kinematics_integrator == 'euler':
x = x + self.tau * x_dot
x_dot = x_dot + self.tau * xacc
theta = theta + self.tau * theta_dot
theta_dot = theta_dot + self.tau * thetaacc
else: # semi-implicit euler
x_dot = x_dot + self.tau * xacc
x = x + self.tau * x_dot
theta_dot = theta_dot + self.tau * thetaacc
theta = theta + self.tau * theta_dot
self.state = (x,x_dot,theta,theta_dot)
done = x < -self.x_threshold \
or x > self.x_threshold \
or theta < -self.theta_threshold_radians \
or theta > self.theta_threshold_radians
done = bool(done)
self.steps_in_episode += 1
if not done:
reward = 1.0
elif self.steps_beyond_done is None:
# Pole just fell!
self.steps_beyond_done = 0
reward = 1.0
else:
if self.steps_beyond_done == 0:
logger.warn("You are calling 'step()' even though this environment has already returned done = True. You should always call 'reset()' once you receive 'done = True' -- any further steps are undefined behavior.")
self.steps_beyond_done += 1
reward = 0.0
info = {}
info['success'] = self.steps_in_episode >= 195
return np.array(self.state), reward, done, info
def reset(self):
self._sample_parameter()
self.state = self.np_random.uniform(low=-0.05, high=0.05, size=(4,))
self.steps_beyond_done = None
self.steps_in_episode = 0
return np.array(self.state)
def render(self, mode='human'):
screen_width = 600
screen_height = 400
world_width = self.x_threshold*2
scale = screen_width/world_width
carty = 100 # TOP OF CART
polewidth = 10.0
polelen = scale * (2 * self.length)
cartwidth = 50.0
cartheight = 30.0
if self.viewer is None:
from gym.envs.classic_control import rendering
self.viewer = rendering.Viewer(screen_width, screen_height)
l,r,t,b = -cartwidth/2, cartwidth/2, cartheight/2, -cartheight/2
axleoffset =cartheight/4.0
cart = rendering.FilledPolygon([(l,b), (l,t), (r,t), (r,b)])
self.carttrans = rendering.Transform()
cart.add_attr(self.carttrans)
self.viewer.add_geom(cart)
l,r,t,b = -polewidth/2,polewidth/2,polelen-polewidth/2,-polewidth/2
pole = rendering.FilledPolygon([(l,b), (l,t), (r,t), (r,b)])
pole.set_color(.8,.6,.4)
self.poletrans = rendering.Transform(translation=(0, axleoffset))
pole.add_attr(self.poletrans)
pole.add_attr(self.carttrans)
self.viewer.add_geom(pole)
self.axle = rendering.make_circle(polewidth/2)
self.axle.add_attr(self.poletrans)
self.axle.add_attr(self.carttrans)
self.axle.set_color(.5,.5,.8)
self.viewer.add_geom(self.axle)
self.track = rendering.Line((0,carty), (screen_width,carty))
self.track.set_color(0,0,0)
self.viewer.add_geom(self.track)
self._pole_geom = pole
if self.state is None: return None
# Edit the pole polygon vertex
pole = self._pole_geom
l,r,t,b = -polewidth/2,polewidth/2,polelen-polewidth/2,-polewidth/2
pole.v = [(l,b), (l,t), (r,t), (r,b)]
x = self.state
cartx = x[0]*scale+screen_width/2.0 # MIDDLE OF CART
self.carttrans.set_translation(cartx, carty)
self.poletrans.set_rotation(-x[2])
return self.viewer.render(return_rgb_array = mode=='rgb_array')
def close(self):
if self.viewer:
self.viewer.close()
self.viewer = None
| configurable_control_gym/envs/cartpole.py | 8,820 | Description:
A pole is attached by an un-actuated joint to a cart, which moves along a frictionless track. The pendulum starts upright, and the goal is to prevent it from falling over by increasing and reducing the cart's velocity.
Source:
This environment corresponds to the version of the cart-pole problem described by Barto, Sutton, and Anderson
Observation:
Type: Box(4)
Num Observation Min Max
0 Cart Position -4.8 4.8
1 Cart Velocity -Inf Inf
2 Pole Angle -24 deg 24 deg
3 Pole Velocity At Tip -Inf Inf
Actions:
Type: Discrete(2)
Num Action
0 Push cart to the left
1 Push cart to the right
Note: The amount the velocity that is reduced or increased is not fixed; it depends on the angle the pole is pointing. This is because the center of gravity of the pole increases the amount of energy needed to move the cart underneath it
Reward:
Reward is 1 for every step taken, including the termination step
Starting State:
All observations are assigned a uniform random value in [-0.05..0.05]
Episode Termination:
Pole Angle is more than 12 degrees
Cart Position is more than 2.4 (center of the cart reaches the edge of the display)
Episode length is greater than 200
Solved Requirements
Considered solved when the average reward is greater than or equal to 195.0 over 100 consecutive trials.
seconds between state updates Angle at which to fail the episode Angle limit set to 2 * theta_threshold_radians so failing observation is still within bounds semi-implicit euler Pole just fell! TOP OF CART Edit the pole polygon vertex MIDDLE OF CART | 1,789 | en | 0.908914 |
# Copyright (c) 2015 Yubico AB
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or
# without modification, are permitted provided that the following
# conditions are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import os
with open(
os.path.join(
os.path.dirname(__file__), 'VERSION')) as version_file:
version = version_file.read().strip()
__version__ = version
| ykman/__init__.py | 1,543 | Copyright (c) 2015 Yubico AB All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | 1,318 | en | 0.883634 |
#
# Spec2Vec
#
# Copyright 2019 Netherlands eScience Center
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numba
import numpy as np
from scipy.optimize import linear_sum_assignment
from scipy import spatial
# Add multi core parallelization
from concurrent.futures import ThreadPoolExecutor #, as_completed
# TODO better use joblib ? or dask?
def mol_sim_matrix(fingerprints1,
fingerprints2,
method='cosine',
filename=None,
max_size=1000,
print_progress=True):
"""Create Matrix of all molecular similarities (based on molecular fingerprints).
If filename is not None, the result will be saved as npy.
To create molecular fingerprints see mol_fingerprints() function from MS_functions.
Args:
----
fingerprints1: list
List of molecular fingerprints (numpy arrays).
fingerprints2: list
List of molecular fingerprints (numpy arrays).
method: str
Method to compare molecular fingerprints. Can be 'cosine', 'dice' etc.
(see scipy.spatial.distance.cdist).
filename: str
Filename to save results to. OR: If file already exists it will be
loaded instead.
max_size: int
Maximum size of (sub) all-vs-all matrix to handle in one go. Will split
up larger matrices into
max_size x max_size matrices.
print_progress: bool, optional
If True, print phase of the run to indicate progress. Default = True.
"""
if filename is not None:
try:
molecular_similarities = np.load(filename)
print("Molecular similarity scores found and loaded.")
collect_new_data = False
except FileNotFoundError:
print("Could not find file ", filename)
print("Molecular scores will be calculated from scratch.")
collect_new_data = True
else:
collect_new_data = True
if collect_new_data:
# Create array of all finterprints
fingerprints_arr1 = np.array(fingerprints1)
fingerprints_arr2 = np.array(fingerprints2)
# Calculate all-vs-all similarity matrix (similarity here= 1-distance )
matrix_size = (fingerprints_arr1.shape[0], fingerprints_arr2.shape[0])
molecular_similarities = np.zeros(matrix_size)
# Split large matrices up into smaller ones to track progress
splits = int(np.ceil(matrix_size[0]/max_size) * np.ceil(matrix_size[1]/max_size))
count_splits = 0
for i in range(int(np.ceil(matrix_size[0]/max_size))):
low1 = i * max_size
high1 = min((i + 1) * max_size, matrix_size[0])
for j in range(int(np.ceil(matrix_size[1]/max_size))):
low2 = j * max_size
high2 = min((j + 1) * max_size, matrix_size[1])
molecular_similarities[low1:high1, low2:high2] = 1 - spatial.distance.cdist(
fingerprints_arr1[low1:high1],
fingerprints_arr2[low2:high2],
method
)
# Track progress:
count_splits += 1
if print_progress:
print('\r',
"Calculated submatrix {} out of {}".format(count_splits, splits),
end="")
if print_progress:
print(20 * '--')
print("Succesfully calculated matrix with all-vs-all molecular similarity values.")
if filename is not None:
np.save(filename, molecular_similarities)
print("Matrix was saved under:", filename)
return molecular_similarities
# --------------------------------------------------------------------------------------------------
# ---------------------------- classical spectra similarity measures -------------------------------
# --------------------------------------------------------------------------------------------------
def cosine_score_greedy(spec1,
spec2,
mass_shift,
tol,
min_intens=0,
use_numba=True):
"""Calculate cosine score between spectrum1 and spectrum2.
If mass_shifted = True it will shift the spectra with respect to each other
by difference in their parentmasses.
Args:
----
spec1: Spectrum peaks and intensities as numpy array.
spec2: Spectrum peaks and intensities as numpy array.
tol: float
Tolerance value to define how far two peaks can be apart to still count as match.
min_intens: float
Minimum intensity (relative to max.intensity peak in spectrum). Peaks with lower
intensity will be ignored --> higher min_intens is faster, but less precise.
"""
if spec1.shape[0] == 0 or spec2.shape[0] == 0:
return 0.0, []
# normalize intensities:
spec1[:, 1] = spec1[:, 1]/max(spec1[:, 1])
spec2[:, 1] = spec2[:, 1]/max(spec2[:, 1])
# filter, if wanted:
spec1 = spec1[spec1[:, 1] > min_intens, :]
spec2 = spec2[spec2[:, 1] > min_intens, :]
if use_numba:
zero_pairs = find_pairs_numba(spec1, spec2, tol, shift=0.0)
else:
zero_pairs = find_pairs(spec1, spec2, tol, shift=0.0)
if mass_shift is not None \
and mass_shift != 0.0:
if use_numba:
nonzero_pairs = find_pairs_numba(spec1, spec2, tol, shift=mass_shift)
else:
nonzero_pairs = find_pairs(spec1, spec2, tol, shift=mass_shift)
matching_pairs = zero_pairs + nonzero_pairs
else:
matching_pairs = zero_pairs
matching_pairs = sorted(matching_pairs, key=lambda x: x[2], reverse=True)
used1 = set()
used2 = set()
score = 0.0
used_matches = []
for m in matching_pairs:
if not m[0] in used1 and not m[1] in used2:
score += m[2]
used1.add(m[0])
used2.add(m[1])
used_matches.append(m)
# Normalize score:
score = score/max(np.sum(spec1[:, 1]**2), np.sum(spec2[:, 1]**2))
return score, used_matches
def cosine_score_hungarian(spec1,
spec2,
mass_shift,
tol,
min_intens=0):
"""Taking full care of weighted bipartite matching problem.
Use Hungarian algorithm (slow...)
Args:
--------
spec1: Spectrum peaks and intensities as numpy array.
spec2: Spectrum peaks and intensities as numpy array.
mass_shift: float
Difference in parent mass of both spectra to account for. Set to 'None'
when no shifting is desired --> back to normal cosine score.
tol: float
Tolerance value to define how far two peaks can be apart to still count as match.
min_intens: float
Minimum intensity (relative to max.intensity peak in spectrum). Peaks with lower
intensity will be ignored --> higher min_intens is faster, but less precise.
"""
if spec1.shape[0] == 0 or spec2.shape[0] == 0:
return 0.0, []
# Normalize intensities:
spec1[:, 1] = spec1[:, 1]/max(spec1[:, 1])
spec2[:, 1] = spec2[:, 1]/max(spec2[:, 1])
# Filter, if wanted:
spec1 = spec1[spec1[:, 1] > min_intens, :]
spec2 = spec2[spec2[:, 1] > min_intens, :]
zero_pairs = find_pairs_numba(spec1, spec2, tol, shift=0.0)
if mass_shift is not None \
and mass_shift != 0.0:
nonzero_pairs = find_pairs_numba(spec1, spec2, tol, shift=mass_shift)
matching_pairs = zero_pairs + nonzero_pairs
else:
matching_pairs = zero_pairs
matching_pairs = sorted(matching_pairs, key=lambda x: x[2], reverse=True)
# Use Hungarian_algorithm:
used_matches = []
list1 = list(set([x[0] for x in matching_pairs]))
list2 = list(set([x[1] for x in matching_pairs]))
matrix_size = (len(list1), len(list2))
matrix = np.ones(matrix_size)
if len(matching_pairs) > 0:
for m in matching_pairs:
matrix[list1.index(m[0]), list2.index(m[1])] = 1 - m[2]
# Use hungarian agorithm to solve the linear sum assignment problem
row_ind, col_ind = linear_sum_assignment(matrix)
score = len(row_ind) - matrix[row_ind, col_ind].sum()
used_matches = [(list1[x], list2[y]) for (x, y) in zip(row_ind, col_ind)]
# Normalize score:
score = score/max(np.sum(spec1[:, 1]**2), np.sum(spec2[:, 1]**2))
else:
score = 0.0
return score, used_matches
def cosine_matrix_fast(spectra,
tol,
max_mz,
min_mz=0):
"""Calculates cosine similarity matrix.
Be careful! Binning is here done by creating one-hot vectors.
It is hence really actual "bining" and different from the tolerance-based
approach used for the cosine_matrix or molnet_matrix!
Also: tol here is about tol/2 when compared to cosine_matrix or molnet_matrix...
"""
for i, spectrum in enumerate(spectra):
spec = np.array(spectrum.peaks.copy(), dtype=float)
# Normalize intensities:
spec[:, 1] = spec[:, 1]/np.max(spec[:, 1])
if i == 0:
vector = one_hot_spectrum(spec, tol, max_mz, shift=0, min_mz=min_mz, method='max')
spec_vectors = np.zeros((len(spectra), vector.shape[0]))
spec_vectors[0, :] = vector
else:
spec_vectors[i, :] = one_hot_spectrum(spec, tol,
max_mz, shift=0,
min_mz=min_mz,
method='max')
Cdist = spatial.distance.cdist(spec_vectors, spec_vectors, 'cosine')
return 1 - Cdist
def cosine_score_matrix(spectra,
tol,
max_mz=1000.0,
# min_mz=0,
min_intens=0,
mass_shifting=False,
method='hungarian',
num_workers=4,
filename=None,
safety_points=None):
"""Create Matrix of all modified cosine similarities.
Takes some time to calculate, so better only do it once and save as npy.
Now implemented: parallelization of code using concurrent.futures and numba options.
spectra: list
List of spectra (of Spectrum class)
tol: float
Tolerance to still count peaks a match (mz +- tolerance).
max_mz: float
Maxium m-z mass to take into account
#min_mz: float
# Minimum m-z mass to take into account
min_intens: float
Sets the minimum relative intensity peaks must have to be looked at for
potential matches.
mass_shifting: bool
Set to 'True' if mass difference between spectra should be accounted for
--> "modified cosine" score
Set to 'False' for --> "normal cosine" score
method: 'greedy', 'greedy-numba', 'hungarian'
"greedy" will use Simon's molnet scoring which is faster than hungarian,
but not 100% accurate
regarding the weighted bipartite matching problem.
"hungarian" will use the Hungarian algorithm, which is more accurate.
Since its slower, numba is used here to compile in time.
"greedy-numba" will use a (partly) numba compiled version of greedy.
Much faster, but needs numba.
num_workers: int
Number of threads to use for calculation.
filename: str/ None
Filename to look for existing npy-file with molent matrix. Or, if not
found, to use to save the newly calculated matrix.
safety_points: int
Number of safety points, i.e. number of times the modcos-matrix is saved
during process. Set to 'None' to avoid saving matrix on the way.
"""
if filename is not None:
if filename[-4:] != '.npy':
filename = filename + '.npy'
# Try loading saved data
try:
print("Loading similarity scores from", filename)
modcos_sim = np.load(filename)
print("Loading min_match values from", filename[:-4]+ "_matches.npy")
modcos_matches = np.load(filename[:-4] + "_matches.npy")
# Check if matrix was calculated to the end:
diagonal = modcos_sim.diagonal()
if np.min(diagonal) == 0:
print("Uncomplete cosine similarity scores found and loaded.")
missing_scores = np.where(diagonal == 0)[0].astype(int)
print("Missing cosine scores will be calculated.")
counter_total = int((len(spectra)**2)/2)
counter_init = counter_total - np.sum(len(spectra) - missing_scores)
print("About ", 100*(counter_init/counter_total),
"% of the values already completed.")
collect_new_data = True
else:
print("Complete cosine similarity scores found and loaded.")
missing_scores = []
counter_init = 0
collect_new_data = False
except FileNotFoundError:
print("Could not find file ", filename, "or file",
filename[:-4] + "_matches.npy")
if mass_shifting:
print("Modified cosine scores will be calculated from scratch.")
else:
print("Cosine scores will be calculated from scratch.")
collect_new_data = True
missing_scores = np.arange(0, len(spectra))
counter_init = 0
else:
collect_new_data = True
missing_scores = np.arange(0, len(spectra))
counter_init = 0
if collect_new_data:
if counter_init == 0:
modcos_sim = np.zeros((len(spectra), len(spectra)))
modcos_matches = np.zeros((len(spectra), len(spectra)))
counter = counter_init
if safety_points is not None:
# Save modcos-matrix along process
safety_save = int(((len(spectra)**2)/2)/safety_points)
print("Calculate pairwise scores by", num_workers, "number of workers.")
for i in missing_scores: #range(n_start, len(spectra)):
spec1 = np.array(spectra[i].peaks, dtype=float)
spec1 = spec1[spec1[:, 0] < max_mz, :]
parameter_collection = []
for j in range(i, len(spectra)):
spec2 = np.array(spectra[j].peaks, dtype=float)
spec2 = spec2[spec2[:, 0] < max_mz, :]
if mass_shifting:
mass_shift = spectra[i].parent_mz - spectra[j].parent_mz
else:
mass_shift = None
parameter_collection.append([spec1, spec2, i, j,
mass_shift, tol, min_intens,
method, counter])
counter += 1
# Create a pool of processes. For instance one for each CPU in your machine.
modcos_pairs = []
with ThreadPoolExecutor(max_workers=num_workers) as executor:
futures = [executor.submit(modcos_pair, X, len(spectra)) for X in parameter_collection]
modcos_pairs.append(futures)
for m, future in enumerate(modcos_pairs[0]):
_, _, ind_i, ind_j, _, _, _, _, counting = parameter_collection[m]
modcos_sim[ind_i, ind_j] = future.result()[0]
modcos_matches[ind_i, ind_j] = future.result()[1]
if filename is not None \
and safety_points is not None:
if (counting+1) % safety_save == 0:
np.save(filename, modcos_sim)
np.save(filename[:-4] + "_matches.npy", modcos_matches)
# Symmetric matrix --> fill
for i in range(1, len(spectra)):
for j in range(i):
modcos_sim[i, j] = modcos_sim[j, i]
modcos_matches[i, j] = modcos_matches[j, i]
# Save final results
if filename is not None:
np.save(filename, modcos_sim)
np.save(filename[:-4]+ "_matches.npy", modcos_matches)
return modcos_sim, modcos_matches
def modcos_pair(X, len_spectra):
"""Single molnet pair calculation
"""
spectra_i, spectra_j, i, j, mass_shift, tol, min_intens, method, counter = X
if method == 'greedy':
molnet_pair, used_matches = cosine_score_greedy(spectra_i, spectra_j,
mass_shift, tol,
min_intens=min_intens,
use_numba=False)
elif method == 'greedy-numba':
molnet_pair, used_matches = cosine_score_greedy(spectra_i, spectra_j,
mass_shift, tol,
min_intens=min_intens,
use_numba=True)
elif method == 'hungarian':
molnet_pair, used_matches = cosine_score_hungarian(spectra_i, spectra_j,
mass_shift, tol,
min_intens=min_intens)
else:
print("Given method does not exist...")
if (counter+1) % 1000 == 0 or counter == len_spectra-1:
print('\r',
' Calculated MolNet for pair {} -- {}'.format(i, j),
'. ( ', np.round(200*(counter+1)/len_spectra**2, 2), ' % done).',
end="")
return molnet_pair, len(used_matches)
def one_hot_spectrum(spec,
tol,
max_mz,
shift=0,
min_mz=0,
method='max'):
"""Convert spectrum peaks into on-hot-vector
method: str
'max' take highest intensity peak within every bin.
'sum' take sum of all peaks within every bin.
"""
dim_vector = int((max_mz - min_mz)/tol)
one_hot_spec = np.zeros((dim_vector))
idx = ((spec[:, 0] + shift)*1/tol).astype(int)
idx[idx >= dim_vector] = 0
idx[idx < 0] = 0
if method == 'max':
for id1 in set(idx):
one_hot_spec[id1] = np.max(spec[(idx == id1), 1])
elif method == 'sum':
for id1 in set(idx):
one_hot_spec[id1] = np.sum(spec[(idx == id1), 1])
else:
print("Method not known...")
return one_hot_spec
@numba.njit
def find_pairs_numba(spec1, spec2, tol, shift=0):
"""Find matching pairs between two spectra.
Args
----
spec1 : list of tuples
List of (mz, intensity) tuples.
spec2 : list of tuples
List of (mz, intensity) tuples.
tol : float
Tolerance. Peaks will be considered a match when < tol appart.
shift : float, optional
Shift spectra peaks by shift. The default is 0.
Returns
-------
matching_pairs : list
List of found matching peaks.
"""
matching_pairs = []
for idx in range(len(spec1)):
intensity = spec1[idx, 1]
matches = np.where((np.abs(spec2[:, 0] - spec1[idx, 0] + shift) <= tol))[0]
for match in matches:
matching_pairs.append((idx, match, intensity*spec2[match][1]))
return matching_pairs
def find_pairs(spec1, spec2, tol, shift=0):
"""Find matching pairs between two spectra.
Args
----
spec1 : list of tuples
List of (mz, intensity) tuples.
spec2 : list of tuples
List of (mz, intensity) tuples.
tol : float
Tolerance. Peaks will be considered a match when < tol appart.
shift : float, optional
Shift spectra peaks by shift. The default is 0.
Returns
-------
matching_pairs : list
List of found matching peaks.
"""
# Sort peaks and losses by m/z
spec1 = spec1[np.lexsort((spec1[:, 1], spec1[:, 0])), :]
spec2 = spec2[np.lexsort((spec2[:, 1], spec2[:, 0])), :]
matching_pairs = []
spec2lowpos = 0
spec2length = len(spec2)
for idx in range(len(spec1)):
mz = spec1[idx, 0]
intensity = spec1[idx, 1]
# Do we need to increase the lower idx?
while spec2lowpos < spec2length and spec2[spec2lowpos][0] + shift < mz - tol:
spec2lowpos += 1
if spec2lowpos == spec2length:
break
spec2pos = spec2lowpos
while(spec2pos < spec2length and spec2[spec2pos][0] + shift < mz + tol):
matching_pairs.append((idx, spec2pos, intensity * spec2[spec2pos][1]))
spec2pos += 1
return matching_pairs | matchms/old/ms_similarity_classical.py | 21,310 | Calculates cosine similarity matrix.
Be careful! Binning is here done by creating one-hot vectors.
It is hence really actual "bining" and different from the tolerance-based
approach used for the cosine_matrix or molnet_matrix!
Also: tol here is about tol/2 when compared to cosine_matrix or molnet_matrix...
Calculate cosine score between spectrum1 and spectrum2.
If mass_shifted = True it will shift the spectra with respect to each other
by difference in their parentmasses.
Args:
----
spec1: Spectrum peaks and intensities as numpy array.
spec2: Spectrum peaks and intensities as numpy array.
tol: float
Tolerance value to define how far two peaks can be apart to still count as match.
min_intens: float
Minimum intensity (relative to max.intensity peak in spectrum). Peaks with lower
intensity will be ignored --> higher min_intens is faster, but less precise.
Taking full care of weighted bipartite matching problem.
Use Hungarian algorithm (slow...)
Args:
--------
spec1: Spectrum peaks and intensities as numpy array.
spec2: Spectrum peaks and intensities as numpy array.
mass_shift: float
Difference in parent mass of both spectra to account for. Set to 'None'
when no shifting is desired --> back to normal cosine score.
tol: float
Tolerance value to define how far two peaks can be apart to still count as match.
min_intens: float
Minimum intensity (relative to max.intensity peak in spectrum). Peaks with lower
intensity will be ignored --> higher min_intens is faster, but less precise.
Create Matrix of all modified cosine similarities.
Takes some time to calculate, so better only do it once and save as npy.
Now implemented: parallelization of code using concurrent.futures and numba options.
spectra: list
List of spectra (of Spectrum class)
tol: float
Tolerance to still count peaks a match (mz +- tolerance).
max_mz: float
Maxium m-z mass to take into account
#min_mz: float
# Minimum m-z mass to take into account
min_intens: float
Sets the minimum relative intensity peaks must have to be looked at for
potential matches.
mass_shifting: bool
Set to 'True' if mass difference between spectra should be accounted for
--> "modified cosine" score
Set to 'False' for --> "normal cosine" score
method: 'greedy', 'greedy-numba', 'hungarian'
"greedy" will use Simon's molnet scoring which is faster than hungarian,
but not 100% accurate
regarding the weighted bipartite matching problem.
"hungarian" will use the Hungarian algorithm, which is more accurate.
Since its slower, numba is used here to compile in time.
"greedy-numba" will use a (partly) numba compiled version of greedy.
Much faster, but needs numba.
num_workers: int
Number of threads to use for calculation.
filename: str/ None
Filename to look for existing npy-file with molent matrix. Or, if not
found, to use to save the newly calculated matrix.
safety_points: int
Number of safety points, i.e. number of times the modcos-matrix is saved
during process. Set to 'None' to avoid saving matrix on the way.
Find matching pairs between two spectra.
Args
----
spec1 : list of tuples
List of (mz, intensity) tuples.
spec2 : list of tuples
List of (mz, intensity) tuples.
tol : float
Tolerance. Peaks will be considered a match when < tol appart.
shift : float, optional
Shift spectra peaks by shift. The default is 0.
Returns
-------
matching_pairs : list
List of found matching peaks.
Find matching pairs between two spectra.
Args
----
spec1 : list of tuples
List of (mz, intensity) tuples.
spec2 : list of tuples
List of (mz, intensity) tuples.
tol : float
Tolerance. Peaks will be considered a match when < tol appart.
shift : float, optional
Shift spectra peaks by shift. The default is 0.
Returns
-------
matching_pairs : list
List of found matching peaks.
Single molnet pair calculation
Create Matrix of all molecular similarities (based on molecular fingerprints).
If filename is not None, the result will be saved as npy.
To create molecular fingerprints see mol_fingerprints() function from MS_functions.
Args:
----
fingerprints1: list
List of molecular fingerprints (numpy arrays).
fingerprints2: list
List of molecular fingerprints (numpy arrays).
method: str
Method to compare molecular fingerprints. Can be 'cosine', 'dice' etc.
(see scipy.spatial.distance.cdist).
filename: str
Filename to save results to. OR: If file already exists it will be
loaded instead.
max_size: int
Maximum size of (sub) all-vs-all matrix to handle in one go. Will split
up larger matrices into
max_size x max_size matrices.
print_progress: bool, optional
If True, print phase of the run to indicate progress. Default = True.
Convert spectrum peaks into on-hot-vector
method: str
'max' take highest intensity peak within every bin.
'sum' take sum of all peaks within every bin.
Spec2Vec Copyright 2019 Netherlands eScience Center Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Add multi core parallelization, as_completed TODO better use joblib ? or dask? Create array of all finterprints Calculate all-vs-all similarity matrix (similarity here= 1-distance ) Split large matrices up into smaller ones to track progress Track progress: -------------------------------------------------------------------------------------------------- ---------------------------- classical spectra similarity measures ------------------------------- -------------------------------------------------------------------------------------------------- normalize intensities: filter, if wanted: Normalize score: Normalize intensities: Filter, if wanted: Use Hungarian_algorithm: Use hungarian agorithm to solve the linear sum assignment problem Normalize score: Normalize intensities: min_mz=0, Try loading saved data Check if matrix was calculated to the end: Save modcos-matrix along processrange(n_start, len(spectra)): Create a pool of processes. For instance one for each CPU in your machine. Symmetric matrix --> fill Save final results Sort peaks and losses by m/z Do we need to increase the lower idx? | 6,653 | en | 0.801504 |
from leapp.models import Model, fields
from leapp.topics import BootPrepTopic, SystemInfoTopic
from leapp.utils.deprecation import deprecated
class DracutModule(Model):
"""
Specify a dracut module that should be included into the initramfs
The specified dracut module has to be compatible with the target system.
See the description of UpgradeInitramfsTasks and TargetInitramfsTasks
for more information about the role of initramfs in the in-place upgrade
process.
"""
topic = BootPrepTopic
name = fields.String()
"""
Name of the dracut module that should be added (--add option of dracut)
when a initramfs is built.
"""
module_path = fields.Nullable(fields.String(default=None))
"""
module_path specifies dracut modules that are supposed to be copied
If the path is not set, the given name will just be activated. IOW,
if the dracut module is stored outside the /usr/lib/dracut/modules.d/
directory, set the absolute path to it, so leapp will manage it during
the upgrade to ensure the module will be added into the initramfs.
The module has to be stored on the local storage. In such a case, it is
recommended to store it into the 'files' directory of an actor generating
this object.
Note: It's expected to set the full path from the host POV. In case
of actions inside containers, the module is still copied from the HOST
into the container workspace.
"""
class UpgradeInitramfsTasks(Model):
"""
Influence generating of the (leapp) upgrade initramfs
The upgrade initramfs is used during the crucial part of the upgrade,
in which the original rpms are upgraded, configuration of applications
are migrated, etc. To be able to boot into the leapp upgrade environment
correctly, it is expected all needed drivers, configuration files, ... are
included inside the upgrade initramfs. Produce this message with
expected content to influence the upgrade initramfs.
If some specific rpms or content is required to be able to build the
upgrade initramfs, see the <container-model>.
Note: The built initramfs is composed of stuff for the target system.
In example, if you are on RHEL 7 and plan the upgrade to RHEL 8, you need
to provide content (e.g. drivers, dracut modules) compatible with
RHEL 8 system.
"""
topic = BootPrepTopic
include_files = fields.List(fields.String(), default=[])
"""
List of files (cannonical filesystem paths) to include in the initramfs
"""
include_dracut_modules = fields.List(fields.Model(DracutModule), default=[])
"""
List of dracut modules that should be installed in the initramfs.
See the DracutModule model for more information.
"""
class TargetInitramfsTasks(UpgradeInitramfsTasks):
"""
Analogy to UpgradeInitramfsTasks, but referring to the target initram disk.
Target initramfs is the one, that will be used to boot to your upgraded
system. If you want to ensure that you are able to boot into the target
(upgraded) system, it is possible you need to add same stuff as you added
into the upgrade initramfs.
If some specific rpms are required to be able to build the upgrade
initramfs, install these via the RpmTransactionTasks model.
"""
@deprecated(since='2021-04-01', message='Replaced by TargetInitramfsTasks.')
class InitrdIncludes(Model):
"""
List of files (cannonical filesystem paths) to include in RHEL-8 initramfs
"""
topic = SystemInfoTopic
files = fields.List(fields.String())
@deprecated(since='2021-04-01', message='Replaced by UpgradeInitramfsTasks.')
class UpgradeDracutModule(Model):
"""
Specify a dracut module that should be included into the (leapp) upgrade initramfs.
The upgrade initramfs is used during the crucial part of the upgrade,
in which the original rpms are upgraded. If a dracut module is required to
be included inside the upgrade initramfs (e.g. because it is needed
to handle/initialize your storage properly), produce this msg.
"""
topic = BootPrepTopic
name = fields.String()
"""
Name of the dracut module that should be added (--add option of dracut)
"""
module_path = fields.Nullable(fields.String(default=None))
"""
module_path specifies dracut modules that are to be copied
If the path is not set, the given name will just be activated.
"""
| repos/system_upgrade/common/models/initramfs.py | 4,478 | Specify a dracut module that should be included into the initramfs
The specified dracut module has to be compatible with the target system.
See the description of UpgradeInitramfsTasks and TargetInitramfsTasks
for more information about the role of initramfs in the in-place upgrade
process.
List of files (cannonical filesystem paths) to include in RHEL-8 initramfs
Analogy to UpgradeInitramfsTasks, but referring to the target initram disk.
Target initramfs is the one, that will be used to boot to your upgraded
system. If you want to ensure that you are able to boot into the target
(upgraded) system, it is possible you need to add same stuff as you added
into the upgrade initramfs.
If some specific rpms are required to be able to build the upgrade
initramfs, install these via the RpmTransactionTasks model.
Specify a dracut module that should be included into the (leapp) upgrade initramfs.
The upgrade initramfs is used during the crucial part of the upgrade,
in which the original rpms are upgraded. If a dracut module is required to
be included inside the upgrade initramfs (e.g. because it is needed
to handle/initialize your storage properly), produce this msg.
Influence generating of the (leapp) upgrade initramfs
The upgrade initramfs is used during the crucial part of the upgrade,
in which the original rpms are upgraded, configuration of applications
are migrated, etc. To be able to boot into the leapp upgrade environment
correctly, it is expected all needed drivers, configuration files, ... are
included inside the upgrade initramfs. Produce this message with
expected content to influence the upgrade initramfs.
If some specific rpms or content is required to be able to build the
upgrade initramfs, see the <container-model>.
Note: The built initramfs is composed of stuff for the target system.
In example, if you are on RHEL 7 and plan the upgrade to RHEL 8, you need
to provide content (e.g. drivers, dracut modules) compatible with
RHEL 8 system. | 1,984 | en | 0.912068 |
# -*- coding: utf-8 -*-
"""
Defines the unit tests for the :mod:`colour.colorimetry.luminance` module.
"""
import numpy as np
import unittest
from colour.colorimetry import (
luminance_Newhall1943, intermediate_luminance_function_CIE1976,
luminance_CIE1976, luminance_ASTMD1535, luminance_Fairchild2010,
luminance_Fairchild2011, luminance_Abebe2017)
from colour.colorimetry.luminance import luminance
from colour.utilities import domain_range_scale, ignore_numpy_errors
__author__ = 'Colour Developers'
__copyright__ = 'Copyright (C) 2013-2021 - Colour Developers'
__license__ = 'New BSD License - https://opensource.org/licenses/BSD-3-Clause'
__maintainer__ = 'Colour Developers'
__email__ = 'colour-developers@colour-science.org'
__status__ = 'Production'
__all__ = [
'TestLuminanceNewhall1943', 'TestLuminanceASTMD1535',
'TestIntermediateLuminanceFunctionCIE1976', 'TestLuminanceCIE1976',
'TestLuminanceFairchild2010', 'TestLuminanceFairchild2011',
'TestLuminanceAbebe2017', 'TestLuminance'
]
class TestLuminanceNewhall1943(unittest.TestCase):
"""
Defines :func:`colour.colorimetry.luminance.luminance_Newhall1943`
definition unit tests methods.
"""
def test_luminance_Newhall1943(self):
"""
Tests :func:`colour.colorimetry.luminance.luminance_Newhall1943`
definition.
"""
self.assertAlmostEqual(
luminance_Newhall1943(4.08244375), 12.550078816731881, places=7)
self.assertAlmostEqual(
luminance_Newhall1943(5.39132685), 23.481252371310738, places=7)
self.assertAlmostEqual(
luminance_Newhall1943(2.97619312), 6.4514266875601924, places=7)
def test_n_dimensional_luminance_Newhall1943(self):
"""
Tests :func:`colour.colorimetry.luminance.luminance_Newhall1943`
definition n-dimensional arrays support.
"""
V = 4.08244375
Y = luminance_Newhall1943(V)
V = np.tile(V, 6)
Y = np.tile(Y, 6)
np.testing.assert_almost_equal(luminance_Newhall1943(V), Y, decimal=7)
V = np.reshape(V, (2, 3))
Y = np.reshape(Y, (2, 3))
np.testing.assert_almost_equal(luminance_Newhall1943(V), Y, decimal=7)
V = np.reshape(V, (2, 3, 1))
Y = np.reshape(Y, (2, 3, 1))
np.testing.assert_almost_equal(luminance_Newhall1943(V), Y, decimal=7)
def test_domain_range_scale_luminance_Newhall1943(self):
"""
Tests :func:`colour.colorimetry.luminance.luminance_Newhall1943`
definition domain and range scale support.
"""
Y = luminance_Newhall1943(4.08244375)
d_r = (('reference', 1, 1), (1, 0.1, 0.01), (100, 10, 1))
for scale, factor_a, factor_b in d_r:
with domain_range_scale(scale):
np.testing.assert_almost_equal(
luminance_Newhall1943(4.08244375 * factor_a),
Y * factor_b,
decimal=7)
@ignore_numpy_errors
def test_nan_luminance_Newhall1943(self):
"""
Tests :func:`colour.colorimetry.luminance.luminance_Newhall1943`
definition nan support.
"""
luminance_Newhall1943(
np.array([-1.0, 0.0, 1.0, -np.inf, np.inf, np.nan]))
class TestLuminanceASTMD1535(unittest.TestCase):
"""
Defines :func:`colour.colorimetry.luminance.luminance_ASTMD1535`
definition unit tests methods.
"""
def test_luminance_ASTMD1535(self):
"""
Tests :func:`colour.colorimetry.luminance.luminance_ASTMD1535`
definition.
"""
self.assertAlmostEqual(
luminance_ASTMD1535(4.08244375), 12.236342675366036, places=7)
self.assertAlmostEqual(
luminance_ASTMD1535(5.39132685), 22.893999867280378, places=7)
self.assertAlmostEqual(
luminance_ASTMD1535(2.97619312), 6.2902253509053132, places=7)
def test_n_dimensional_luminance_ASTMD1535(self):
"""
Tests :func:`colour.colorimetry.luminance.luminance_ASTMD1535`
definition n-dimensional arrays support.
"""
V = 4.08244375
Y = luminance_ASTMD1535(V)
V = np.tile(V, 6)
Y = np.tile(Y, 6)
np.testing.assert_almost_equal(luminance_ASTMD1535(V), Y, decimal=7)
V = np.reshape(V, (2, 3))
Y = np.reshape(Y, (2, 3))
np.testing.assert_almost_equal(luminance_ASTMD1535(V), Y, decimal=7)
V = np.reshape(V, (2, 3, 1))
Y = np.reshape(Y, (2, 3, 1))
np.testing.assert_almost_equal(luminance_ASTMD1535(V), Y, decimal=7)
def test_domain_range_scale_luminance_ASTMD1535(self):
"""
Tests :func:`colour.colorimetry.luminance.luminance_ASTMD1535`
definition domain and range scale support.
"""
Y = luminance_ASTMD1535(4.08244375)
d_r = (('reference', 1, 1), (1, 0.1, 0.01), (100, 10, 1))
for scale, factor_a, factor_b in d_r:
with domain_range_scale(scale):
np.testing.assert_almost_equal(
luminance_ASTMD1535(4.08244375 * factor_a),
Y * factor_b,
decimal=7)
@ignore_numpy_errors
def test_nan_luminance_ASTMD1535(self):
"""
Tests :func:`colour.colorimetry.luminance.luminance_ASTMD1535`
definition nan support.
"""
luminance_ASTMD1535(
np.array([-1.0, 0.0, 1.0, -np.inf, np.inf, np.nan]))
class TestIntermediateLuminanceFunctionCIE1976(unittest.TestCase):
"""
Defines :func:`colour.colorimetry.luminance.\
intermediate_luminance_function_CIE1976` definition unit tests methods.
"""
def test_intermediate_luminance_function_CIE1976(self):
"""
Tests :func:`colour.colorimetry.luminance.\
intermediate_luminance_function_CIE1976` definition.
"""
self.assertAlmostEqual(
intermediate_luminance_function_CIE1976(0.495929964178047),
12.197225350000002,
places=7)
self.assertAlmostEqual(
intermediate_luminance_function_CIE1976(0.613072093530391),
23.042767810000004,
places=7)
self.assertAlmostEqual(
intermediate_luminance_function_CIE1976(0.394876333449113),
6.157200790000001,
places=7)
def test_n_dimensional_intermediate_luminance_function_CIE1976(self):
"""
Tests :func:`colour.colorimetry.luminance.\
intermediate_luminance_function_CIE1976` definition n-dimensional arrays
support.
"""
f_Y_Y_n = 0.495929964178047
Y = intermediate_luminance_function_CIE1976(f_Y_Y_n)
f_Y_Y_n = np.tile(f_Y_Y_n, 6)
Y = np.tile(Y, 6)
np.testing.assert_almost_equal(
intermediate_luminance_function_CIE1976(f_Y_Y_n), Y, decimal=7)
f_Y_Y_n = np.reshape(f_Y_Y_n, (2, 3))
Y = np.reshape(Y, (2, 3))
np.testing.assert_almost_equal(
intermediate_luminance_function_CIE1976(f_Y_Y_n), Y, decimal=7)
f_Y_Y_n = np.reshape(f_Y_Y_n, (2, 3, 1))
Y = np.reshape(Y, (2, 3, 1))
np.testing.assert_almost_equal(
intermediate_luminance_function_CIE1976(f_Y_Y_n), Y, decimal=7)
def test_domain_range_scale_intermediate_luminance_function_CIE1976(self):
"""
Tests :func:`colour.colorimetry.luminance.\
intermediate_luminance_function_CIE1976` definition domain and range scale
support.
"""
Y = intermediate_luminance_function_CIE1976(41.527875844653451, 100)
for scale in ('reference', 1, 100):
with domain_range_scale(scale):
np.testing.assert_almost_equal(
intermediate_luminance_function_CIE1976(
41.527875844653451, 100),
Y,
decimal=7)
@ignore_numpy_errors
def test_nan_intermediate_luminance_function_CIE1976(self):
"""
Tests :func:`colour.colorimetry.luminance.\
intermediate_luminance_function_CIE1976` definition nan support.
"""
intermediate_luminance_function_CIE1976(
np.array([-1.0, 0.0, 1.0, -np.inf, np.inf, np.nan]))
class TestLuminanceCIE1976(unittest.TestCase):
"""
Defines :func:`colour.colorimetry.luminance.luminance_CIE1976` definition
unit tests methods.
"""
def test_luminance_CIE1976(self):
"""
Tests :func:`colour.colorimetry.luminance.luminance_CIE1976`
definition.
"""
self.assertAlmostEqual(
luminance_CIE1976(41.527875844653451),
12.197225350000002,
places=7)
self.assertAlmostEqual(
luminance_CIE1976(55.116362849525402),
23.042767810000004,
places=7)
self.assertAlmostEqual(
luminance_CIE1976(29.805654680097106), 6.157200790000001, places=7)
self.assertAlmostEqual(
luminance_CIE1976(56.480581732417676, 50),
12.197225349999998,
places=7)
self.assertAlmostEqual(
luminance_CIE1976(47.317620274162735, 75),
12.197225350000002,
places=7)
self.assertAlmostEqual(
luminance_CIE1976(42.519930728120940, 95),
12.197225350000005,
places=7)
def test_n_dimensional_luminance_CIE1976(self):
"""
Tests :func:`colour.colorimetry.luminance.luminance_CIE1976`
definition n-dimensional arrays support.
"""
L_star = 41.527875844653451
Y = luminance_CIE1976(L_star)
L_star = np.tile(L_star, 6)
Y = np.tile(Y, 6)
np.testing.assert_almost_equal(luminance_CIE1976(L_star), Y, decimal=7)
L_star = np.reshape(L_star, (2, 3))
Y = np.reshape(Y, (2, 3))
np.testing.assert_almost_equal(luminance_CIE1976(L_star), Y, decimal=7)
L_star = np.reshape(L_star, (2, 3, 1))
Y = np.reshape(Y, (2, 3, 1))
np.testing.assert_almost_equal(luminance_CIE1976(L_star), Y, decimal=7)
def test_domain_range_scale_luminance_CIE1976(self):
"""
Tests :func:`colour.colorimetry.luminance.luminance_CIE1976`
definition domain and range scale support.
"""
Y = luminance_CIE1976(41.527875844653451, 100)
d_r = (('reference', 1), (1, 0.01), (100, 1))
for scale, factor in d_r:
with domain_range_scale(scale):
np.testing.assert_almost_equal(
luminance_CIE1976(41.527875844653451 * factor, 100),
Y * factor,
decimal=7)
@ignore_numpy_errors
def test_nan_luminance_CIE1976(self):
"""
Tests :func:`colour.colorimetry.luminance.luminance_CIE1976`
definition nan support.
"""
luminance_CIE1976(np.array([-1.0, 0.0, 1.0, -np.inf, np.inf, np.nan]))
class TestLuminanceFairchild2010(unittest.TestCase):
"""
Defines :func:`colour.colorimetry.luminance.luminance_Fairchild2010`
definition unit tests methods.
"""
def test_luminance_Fairchild2010(self):
"""
Tests :func:`colour.colorimetry.luminance.luminance_Fairchild2010`
definition.
"""
self.assertAlmostEqual(
luminance_Fairchild2010(31.996390226262736),
0.12197225350000002,
places=7)
self.assertAlmostEqual(
luminance_Fairchild2010(60.203153682783302),
0.23042767809999998,
places=7)
self.assertAlmostEqual(
luminance_Fairchild2010(11.836517240976489),
0.06157200790000001,
places=7)
self.assertAlmostEqual(
luminance_Fairchild2010(24.424283249379986, 2.75),
0.12197225350000002,
places=7)
self.assertAlmostEqual(
luminance_Fairchild2010(100.019986327374240),
1008.00000024,
places=7)
self.assertAlmostEqual(
luminance_Fairchild2010(100.019999997090270),
100799.92312466,
places=7)
def test_n_dimensional_luminance_Fairchild2010(self):
"""
Tests :func:`colour.colorimetry.luminance.luminance_Fairchild2010`
definition n-dimensional arrays support.
"""
L_hdr = 31.996390226262736
Y = luminance_Fairchild2010(L_hdr)
L_hdr = np.tile(L_hdr, 6)
Y = np.tile(Y, 6)
np.testing.assert_almost_equal(
luminance_Fairchild2010(L_hdr), Y, decimal=7)
L_hdr = np.reshape(L_hdr, (2, 3))
Y = np.reshape(Y, (2, 3))
np.testing.assert_almost_equal(
luminance_Fairchild2010(L_hdr), Y, decimal=7)
L_hdr = np.reshape(L_hdr, (2, 3, 1))
Y = np.reshape(Y, (2, 3, 1))
np.testing.assert_almost_equal(
luminance_Fairchild2010(L_hdr), Y, decimal=7)
def test_domain_range_scale_luminance_Fairchild2010(self):
"""
Tests :func:`colour.colorimetry.luminance.luminance_Fairchild2010`
definition domain and range scale support.
"""
Y = luminance_Fairchild2010(31.996390226262736)
d_r = (('reference', 1, 1), (1, 0.01, 1), (100, 1, 100))
for scale, factor_a, factor_b in d_r:
with domain_range_scale(scale):
np.testing.assert_almost_equal(
luminance_Fairchild2010(31.996390226262736 * factor_a),
Y * factor_b,
decimal=7)
@ignore_numpy_errors
def test_nan_luminance_Fairchild2010(self):
"""
Tests :func:`colour.colorimetry.luminance.luminance_Fairchild2010`
definition nan support.
"""
luminance_Fairchild2010(
np.array([-1.0, 0.0, 1.0, -np.inf, np.inf, np.nan]))
class TestLuminanceFairchild2011(unittest.TestCase):
"""
Defines :func:`colour.colorimetry.luminance.luminance_Fairchild2011`
definition unit tests methods.
"""
def test_luminance_Fairchild2011(self):
"""
Tests :func:`colour.colorimetry.luminance.luminance_Fairchild2011`
definition.
"""
self.assertAlmostEqual(
luminance_Fairchild2011(51.852958445912506),
0.12197225350000007,
places=7)
self.assertAlmostEqual(
luminance_Fairchild2011(65.275207956353853),
0.23042767809999998,
places=7)
self.assertAlmostEqual(
luminance_Fairchild2011(39.818935510715917),
0.061572007900000038,
places=7)
self.assertAlmostEqual(
luminance_Fairchild2011(0.13268968410139345, 2.75),
0.12197225350000002,
places=7)
self.assertAlmostEqual(
luminance_Fairchild2011(234.72925681957565),
1008.00000000,
places=7)
self.assertAlmostEqual(
luminance_Fairchild2011(245.57059778237573),
100800.00000000,
places=7)
def test_n_dimensional_luminance_Fairchild2011(self):
"""
Tests :func:`colour.colorimetry.luminance.luminance_Fairchild2011`
definition n-dimensional arrays support.
"""
L_hdr = 51.852958445912506
Y = luminance_Fairchild2011(L_hdr)
L_hdr = np.tile(L_hdr, 6)
Y = np.tile(Y, 6)
np.testing.assert_almost_equal(
luminance_Fairchild2011(L_hdr), Y, decimal=7)
L_hdr = np.reshape(L_hdr, (2, 3))
Y = np.reshape(Y, (2, 3))
np.testing.assert_almost_equal(
luminance_Fairchild2011(L_hdr), Y, decimal=7)
L_hdr = np.reshape(L_hdr, (2, 3, 1))
Y = np.reshape(Y, (2, 3, 1))
np.testing.assert_almost_equal(
luminance_Fairchild2011(L_hdr), Y, decimal=7)
def test_domain_range_scale_luminance_Fairchild2011(self):
"""
Tests :func:`colour.colorimetry.luminance.luminance_Fairchild2011`
definition domain and range scale support.
"""
Y = luminance_Fairchild2011(26.459509817572265)
d_r = (('reference', 1, 1), (1, 0.01, 1), (100, 1, 100))
for scale, factor_a, factor_b in d_r:
with domain_range_scale(scale):
np.testing.assert_almost_equal(
luminance_Fairchild2011(26.459509817572265 * factor_a),
Y * factor_b,
decimal=7)
@ignore_numpy_errors
def test_nan_luminance_Fairchild2011(self):
"""
Tests :func:`colour.colorimetry.luminance.luminance_Fairchild2011`
definition nan support.
"""
luminance_Fairchild2011(
np.array([-1.0, 0.0, 1.0, -np.inf, np.inf, np.nan]))
class TestLuminanceAbebe2017(unittest.TestCase):
"""
Defines :func:`colour.colorimetry.luminance.luminance_Abebe2017`
definition unit tests methods.
"""
def test_luminance_Abebe2017(self):
"""
Tests :func:`colour.colorimetry.luminance.luminance_Abebe2017`
definition.
"""
self.assertAlmostEqual(
luminance_Abebe2017(0.486955571109229),
12.197225350000004,
places=7)
self.assertAlmostEqual(
luminance_Abebe2017(0.474544792145434, method='Stevens'),
12.197225350000025,
places=7)
self.assertAlmostEqual(
luminance_Abebe2017(0.286847428534793, 1000),
12.197225350000046,
places=7)
self.assertAlmostEqual(
luminance_Abebe2017(0.192145492588158, 4000),
12.197225350000121,
places=7)
self.assertAlmostEqual(
luminance_Abebe2017(0.170365211220992, 4000, method='Stevens'),
12.197225349999933,
places=7)
def test_n_dimensional_luminance_Abebe2017(self):
"""
Tests :func:`colour.colorimetry.luminance.luminance_Abebe2017`
definition n-dimensional arrays support.
"""
L = 0.486955571109229
Y = luminance_Abebe2017(L)
L = np.tile(L, 6)
Y = np.tile(Y, 6)
np.testing.assert_almost_equal(luminance_Abebe2017(L), Y, decimal=7)
L = np.reshape(L, (2, 3))
Y = np.reshape(Y, (2, 3))
np.testing.assert_almost_equal(luminance_Abebe2017(L), Y, decimal=7)
L = np.reshape(L, (2, 3, 1))
Y = np.reshape(Y, (2, 3, 1))
np.testing.assert_almost_equal(luminance_Abebe2017(L), Y, decimal=7)
def test_domain_range_scale_luminance_Abebe2017(self):
"""
Tests :func:`colour.colorimetry.luminance.luminance_Abebe2017`
definition domain and range scale support.
"""
L = luminance_Abebe2017(0.486955571109229)
d_r = (('reference', 1), (1, 1), (100, 1))
for scale, factor in d_r:
with domain_range_scale(scale):
np.testing.assert_almost_equal(
luminance_Abebe2017(0.486955571109229 * factor,
100 * factor),
L * factor,
decimal=7)
@ignore_numpy_errors
def test_nan_luminance_Abebe2017(self):
"""
Tests :func:`colour.colorimetry.luminance.luminance_Abebe2017`
definition nan support.
"""
luminance_Abebe2017(
*[np.array([-1.0, 0.0, 1.0, -np.inf, np.inf, np.nan])] * 2)
class TestLuminance(unittest.TestCase):
"""
Defines :func:`colour.colorimetry.luminance.luminance` definition unit
tests methods.
"""
def test_domain_range_scale_luminance(self):
"""
Tests :func:`colour.colorimetry.luminance.luminance` definition
domain and range scale support.
"""
m = ('Newhall 1943', 'ASTM D1535', 'CIE 1976', 'Fairchild 2010',
'Fairchild 2011', 'Abebe 2017')
v = [luminance(41.527875844653451, method, Y_n=100) for method in m]
d_r = (('reference', 1), (1, 0.01), (100, 1))
for method, value in zip(m, v):
for scale, factor in d_r:
with domain_range_scale(scale):
np.testing.assert_almost_equal(
luminance(
41.527875844653451 * factor, method, Y_n=100),
value * factor,
decimal=7)
if __name__ == '__main__':
unittest.main()
| colour/colorimetry/tests/test_luminance.py | 20,521 | Defines :func:`colour.colorimetry.luminance.intermediate_luminance_function_CIE1976` definition unit tests methods.
Defines :func:`colour.colorimetry.luminance.luminance` definition unit
tests methods.
Defines :func:`colour.colorimetry.luminance.luminance_ASTMD1535`
definition unit tests methods.
Defines :func:`colour.colorimetry.luminance.luminance_Abebe2017`
definition unit tests methods.
Defines :func:`colour.colorimetry.luminance.luminance_CIE1976` definition
unit tests methods.
Defines :func:`colour.colorimetry.luminance.luminance_Fairchild2010`
definition unit tests methods.
Defines :func:`colour.colorimetry.luminance.luminance_Fairchild2011`
definition unit tests methods.
Defines :func:`colour.colorimetry.luminance.luminance_Newhall1943`
definition unit tests methods.
Tests :func:`colour.colorimetry.luminance.intermediate_luminance_function_CIE1976` definition domain and range scale
support.
Tests :func:`colour.colorimetry.luminance.luminance` definition
domain and range scale support.
Tests :func:`colour.colorimetry.luminance.luminance_ASTMD1535`
definition domain and range scale support.
Tests :func:`colour.colorimetry.luminance.luminance_Abebe2017`
definition domain and range scale support.
Tests :func:`colour.colorimetry.luminance.luminance_CIE1976`
definition domain and range scale support.
Tests :func:`colour.colorimetry.luminance.luminance_Fairchild2010`
definition domain and range scale support.
Tests :func:`colour.colorimetry.luminance.luminance_Fairchild2011`
definition domain and range scale support.
Tests :func:`colour.colorimetry.luminance.luminance_Newhall1943`
definition domain and range scale support.
Tests :func:`colour.colorimetry.luminance.intermediate_luminance_function_CIE1976` definition.
Tests :func:`colour.colorimetry.luminance.luminance_ASTMD1535`
definition.
Tests :func:`colour.colorimetry.luminance.luminance_Abebe2017`
definition.
Tests :func:`colour.colorimetry.luminance.luminance_CIE1976`
definition.
Tests :func:`colour.colorimetry.luminance.luminance_Fairchild2010`
definition.
Tests :func:`colour.colorimetry.luminance.luminance_Fairchild2011`
definition.
Tests :func:`colour.colorimetry.luminance.luminance_Newhall1943`
definition.
Tests :func:`colour.colorimetry.luminance.intermediate_luminance_function_CIE1976` definition n-dimensional arrays
support.
Tests :func:`colour.colorimetry.luminance.luminance_ASTMD1535`
definition n-dimensional arrays support.
Tests :func:`colour.colorimetry.luminance.luminance_Abebe2017`
definition n-dimensional arrays support.
Tests :func:`colour.colorimetry.luminance.luminance_CIE1976`
definition n-dimensional arrays support.
Tests :func:`colour.colorimetry.luminance.luminance_Fairchild2010`
definition n-dimensional arrays support.
Tests :func:`colour.colorimetry.luminance.luminance_Fairchild2011`
definition n-dimensional arrays support.
Tests :func:`colour.colorimetry.luminance.luminance_Newhall1943`
definition n-dimensional arrays support.
Tests :func:`colour.colorimetry.luminance.intermediate_luminance_function_CIE1976` definition nan support.
Tests :func:`colour.colorimetry.luminance.luminance_ASTMD1535`
definition nan support.
Tests :func:`colour.colorimetry.luminance.luminance_Abebe2017`
definition nan support.
Tests :func:`colour.colorimetry.luminance.luminance_CIE1976`
definition nan support.
Tests :func:`colour.colorimetry.luminance.luminance_Fairchild2010`
definition nan support.
Tests :func:`colour.colorimetry.luminance.luminance_Fairchild2011`
definition nan support.
Tests :func:`colour.colorimetry.luminance.luminance_Newhall1943`
definition nan support.
Defines the unit tests for the :mod:`colour.colorimetry.luminance` module.
-*- coding: utf-8 -*- | 3,730 | en | 0.576442 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# (c) Copyright IBM Corp. 2010, 2021. All Rights Reserved.
""" setup.py for resilient-circuits Python module """
import io
from os import path
from setuptools import find_packages, setup
this_directory = path.abspath(path.dirname(__file__))
with io.open(path.join(this_directory, "README.md"), encoding="utf-8") as f:
long_description = f.read()
setup(
name="resilient_circuits",
use_scm_version={"root": "../", "relative_to": __file__},
setup_requires=[
"setuptools_scm < 6.0.0;python_version<'3.0'",
"setuptools_scm >= 6.0.0;python_version>='3.0'"
],
license="MIT",
packages=find_packages(),
include_package_data=True,
# Runtime Dependencies
install_requires=[
"stompest>=2.3.0",
"circuits",
"pytz",
"jinja2~=2.0",
"pysocks",
"filelock>=2.0.5",
"watchdog>=0.9.0, <1.0.0; python_version < '3.6.0'",
"watchdog>=0.9.0; python_version >= '3.6.0'",
"resilient>=42.0.0",
"resilient-lib>=42.0.0"
],
entry_points={
"console_scripts": ["res-action-test = resilient_circuits.bin.res_action_test:main",
"resilient-circuits = resilient_circuits.bin.resilient_circuits_cmd:main"]
},
# PyPI metadata
author="IBM SOAR",
description="Framework used to run IBM SOAR Apps and Integrations.",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/ibmresilient/resilient-python-api/tree/master/resilient-circuits",
project_urls={
"Documentation": "https://ibm.biz/soar-docs",
"API Docs": "https://ibm.biz/soar-python-docs",
"IBM Community": "https://ibm.biz/soarcommunity",
"Change Log": "https://ibm.biz/resilient-circuits-changes"
},
classifiers=[
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3.6"
],
keywords="ibm soar resilient circuits resilient-circuits"
)
| resilient-circuits/setup.py | 2,059 | setup.py for resilient-circuits Python module
!/usr/bin/env python -*- coding: utf-8 -*- (c) Copyright IBM Corp. 2010, 2021. All Rights Reserved. Runtime Dependencies PyPI metadata | 182 | en | 0.435333 |
import pyglet
class Resources:
# --- Player Parameters ---
player_animation_started = False
player_images = []
player_animation_time = 1. / 9.
player_animation_index = 0
# --- Obstacle Parameters ---
obstacle_images = []
# --- Player Methods ---
# loads the images needed for the player animation if they haven't been loaded already
@staticmethod
def load_images():
if len(Resources.player_images) == 0:
Resources.player_images.append(pyglet.image.load("res/dinosaur_left.png"))
Resources.player_images.append(pyglet.image.load("res/dinosaur_right.png"))
Resources.player_images.append(pyglet.image.load("res/dinosaur_normal.png"))
if len(Resources.obstacle_images) == 0:
Resources.obstacle_images.append(pyglet.image.load("res/cactus_small.png"))
Resources.obstacle_images.append(pyglet.image.load("res/cactus_big.png"))
Resources.start_player_animation()
# starts the player's running animation by scheduling recurring updates to the player's image index
@staticmethod
def start_player_animation():
if not Resources.player_animation_started:
pyglet.clock.schedule_interval(Resources.trigger_player_update, Resources.player_animation_time)
Resources.player_animation_started = True
# updates the player's image index
@staticmethod
def trigger_player_update(_):
Resources.player_animation_index = 1 - Resources.player_animation_index
# returns the current image for the running player
@staticmethod
def player_running_image():
return Resources.player_images[Resources.player_animation_index]
# returns the image for the jumping player
@staticmethod
def player_jumping_image():
return Resources.player_images[2]
| src/dinosaur/game/resources.py | 1,883 | --- Player Parameters --- --- Obstacle Parameters --- --- Player Methods --- loads the images needed for the player animation if they haven't been loaded already starts the player's running animation by scheduling recurring updates to the player's image index updates the player's image index returns the current image for the running player returns the image for the jumping player | 382 | en | 0.921788 |
# Copyright © 2019 Province of British Columbia
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The unique worker functionality for this service is contained here.
The entry-point is the **cb_nr_subscription_handler**
The design and flow leverage a few constraints that are placed upon it
by NATS Streaming and using AWAIT on the default loop.
- NATS streaming queues require one message to be processed at a time.
- AWAIT on the default loop effectively runs synchronously
If these constraints change, the use of Flask-SQLAlchemy would need to change.
Flask-SQLAlchemy currently allows the base model to be changed, or reworking
the model to a standalone SQLAlchemy usage with an async engine would need
to be pursued.
"""
import json
import os
from typing import Dict
import nats
from auth_api.models import Affiliation as AffiliationModel
from auth_api.models import Entity as EntityModel
from auth_api.models import Org as OrgModel
from auth_api.models import db
from auth_api.services.rest_service import RestService
from auth_api.utils.enums import CorpType
from dateutil import parser
from entity_queue_common.service import QueueServiceManager
from entity_queue_common.service_utils import QueueException, logger
from flask import Flask # pylint: disable=wrong-import-order
from business_events_listener import config
async def cb_nr_subscription_handler(msg: nats.aio.client.Msg):
"""Use Callback to process Queue Msg objects."""
try:
logger.info('Received raw message seq:%s, data= %s', msg.sequence, msg.data.decode())
event_message = json.loads(msg.data.decode('utf-8'))
logger.debug('Event Message Received: %s', event_message)
await process_event(event_message, FLASK_APP)
except Exception: # noqa pylint: disable=broad-except
# Catch Exception so that any error is still caught and the message is removed from the queue
logger.error('Queue Error: %s', json.dumps(event_message), exc_info=True)
async def process_event(event_message, flask_app):
"""Render the org status."""
if not flask_app:
raise QueueException('Flask App not available.')
with flask_app.app_context():
message_type = event_message.get('type', None)
if message_type == 'bc.registry.names.events':
await process_name_events(event_message)
async def process_name_events(event_message: Dict[str, any]):
"""Process name events.
1. Check if the NR already exists in entities table, if yes apply changes. If not create entity record.
2. Check if new status is DRAFT, if yes call pay-api and get the account details for the payments against the NR.
3. If an account is found, affiliate to that account.
Args:
event_message (object): cloud event message, sample below.
{
'specversion': '1.0.1',
'type': 'bc.registry.names.events',
'source': '/requests/6724165',
'id': id,
'time': '',
'datacontenttype': 'application/json',
'identifier': '781020202',
'data': {
'request': {
'nrNum': 'NR 5659951',
'newState': 'APPROVED',
'previousState': 'DRAFT'
}
}
}
"""
logger.debug('>>>>>>>process_name_events>>>>>')
request_data = event_message.get('data').get('request')
nr_number = request_data['nrNum']
nr_status = request_data['newState']
nr_entity = EntityModel.find_by_business_identifier(nr_number)
if nr_entity is None:
logger.info('Entity doesn''t exist, creating a new entity.')
nr_entity = EntityModel(
business_identifier=nr_number,
corp_type_code=CorpType.NR.value
)
nr_entity.status = nr_status
nr_entity.name = request_data.get('name', '') # its not part of event now, this is to handle if they include it.
nr_entity.last_modified_by = None # TODO not present in event message.
nr_entity.last_modified = parser.parse(event_message.get('time'))
if nr_status == 'DRAFT' and AffiliationModel.find_affiliations_by_business_identifier(nr_number) is None:
logger.info('Status is DRAFT, getting invoices for account')
# Find account details for the NR.
invoices = RestService.get(
f'{APP_CONFIG.PAY_API_URL}/payment-requests?businessIdentifier={nr_number}',
token=RestService.get_service_account_token()
).json()
# Ideally there should be only one or two (priority fees) payment request for the NR.
if invoices and (auth_account_id := invoices['invoices'][0].get('paymentAccount').get('accountId')) \
and str(auth_account_id).isnumeric():
logger.info('Account ID received : %s', auth_account_id)
# Auth account id can be service account value too, so doing a query lookup than find_by_id
org: OrgModel = db.session.query(OrgModel).filter(OrgModel.id == auth_account_id).one_or_none()
if org:
nr_entity.pass_code_claimed = True
# Create an affiliation.
logger.info('Creating affiliation between Entity : %s and Org : %s', nr_entity, org)
affiliation: AffiliationModel = AffiliationModel(entity=nr_entity, org=org)
affiliation.flush()
nr_entity.save()
logger.debug('<<<<<<<process_name_events<<<<<<<<<<')
qsm = QueueServiceManager() # pylint: disable=invalid-name
APP_CONFIG = config.get_named_config(os.getenv('DEPLOYMENT_ENV', 'production'))
FLASK_APP = Flask(__name__)
FLASK_APP.config.from_object(APP_CONFIG)
db.init_app(FLASK_APP)
| queue_services/business-events-listener/src/business_events_listener/worker.py | 6,269 | The unique worker functionality for this service is contained here.
The entry-point is the **cb_nr_subscription_handler**
The design and flow leverage a few constraints that are placed upon it
by NATS Streaming and using AWAIT on the default loop.
- NATS streaming queues require one message to be processed at a time.
- AWAIT on the default loop effectively runs synchronously
If these constraints change, the use of Flask-SQLAlchemy would need to change.
Flask-SQLAlchemy currently allows the base model to be changed, or reworking
the model to a standalone SQLAlchemy usage with an async engine would need
to be pursued.
Copyright © 2019 Province of British Columbia Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. pylint: disable=wrong-import-order noqa pylint: disable=broad-except Catch Exception so that any error is still caught and the message is removed from the queue its not part of event now, this is to handle if they include it. TODO not present in event message. Find account details for the NR. Ideally there should be only one or two (priority fees) payment request for the NR. Auth account id can be service account value too, so doing a query lookup than find_by_id Create an affiliation. pylint: disable=invalid-name | 1,716 | en | 0.883344 |
##############################################################################
# Copyright 2019 Parker Berberian and Others #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); #
# you may not use this file except in compliance with the License. #
# You may obtain a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
##############################################################################
from st2tests.base import BaseActionTestCase
from actions.actions import get_task_list
import json
class GetTaskListTestCase(BaseActionTestCase):
action_cls = get_task_list.Task_List_Action
def setUp(self):
super(GetTaskListTestCase, self).setUp()
self.action = self.get_action_instance()
def test_tasklist_multiple_tasks(self):
self.action.action_service.set_value("job_1", json.dumps({
"access": {
"task1": "asdf",
"task2": "fdsa"
}
}), local=False)
result = self.action.run(job_id=1, type="access")
self.assertEqual(set(result), set(["task1", "task2"]))
def test_tasklist_single_task(self):
self.action.action_service.set_value("job_1", json.dumps({
"access": {"task1": "asdf"},
"hardware": {"task10": "asdf"}
}), local=False)
result = self.action.run(job_id=1, type="hardware")
self.assertEqual(set(result), set(["task10"]))
def test_empty_tasklist(self):
self.action.action_service.set_value("job_1", json.dumps({
"access": {"task1": "asdf"},
"hardware": {"task10": "asdf"}
}), local=False)
result = self.action.run(job_id=1, type="unknown")
self.assertFalse(result)
| laas/tests/test_action_get_task_list.py | 2,499 | Copyright 2019 Parker Berberian and Others Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. | 942 | en | 0.865261 |
from datetime import date
import pytest
from dateutil.parser import parse as dt_parse
from freezegun import freeze_time
from app.models.alert_date import AlertDate
def test_AlertDate_properties():
sample_datetime = dt_parse('2021-03-02T10:30:00Z')
alerts_date = AlertDate(sample_datetime)
assert alerts_date.as_lang == 'at 10:30am on Tuesday 2 March 2021'
assert alerts_date.as_iso8601 == '2021-03-02T10:30:00+00:00'
assert alerts_date.as_utc_datetime == dt_parse('2021-03-02T10:30:00Z')
assert alerts_date.as_local_datetime == dt_parse('2021-03-02T10:30:00Z')
assert alerts_date.as_url == '2-mar-2021'
def test_AlertDate_properties_work_with_bst():
sample_datetime = dt_parse('2021-04-20T23:30:00Z')
alerts_date = AlertDate(sample_datetime)
assert alerts_date.as_lang == 'at 12:30am on Wednesday 21 April 2021'
assert alerts_date.as_iso8601 == '2021-04-21T00:30:00+01:00'
assert alerts_date.as_utc_datetime == dt_parse('2021-04-20T23:30:00Z')
assert alerts_date.as_local_datetime == dt_parse('2021-04-21T00:30:00+01:00')
assert alerts_date.as_local_date == date(2021, 4, 21)
assert alerts_date.as_url == '21-apr-2021'
@pytest.mark.parametrize('hour, minute, expected_lang', (
('00', '00', 'at midnight on Sunday 21 March 2021'),
('12', '00', 'at midday on Sunday 21 March 2021'),
('23', '59', 'at 11:59pm on Sunday 21 March 2021'), # 12 hour clock
))
def test_AlertDate_at_midday_and_midnight(hour, minute, expected_lang):
sample_datetime = dt_parse(f'2021-03-21T{hour}:{minute}:00Z')
alerts_date = AlertDate(sample_datetime)
assert alerts_date.as_lang == expected_lang
@pytest.mark.parametrize('now, sample, expected_is_today', (
# GMT
('2021-01-01T00:00:00Z', '2021-12-31T23:59:59Z', False),
('2021-01-01T00:00:00Z', '2021-01-01T00:00:00Z', True),
('2021-01-01T23:59:59Z', '2021-01-01T00:00:00Z', True),
('2021-01-01T00:00:00Z', '2021-01-01T23:59:59Z', True),
('2021-01-01T23:59:59Z', '2021-01-01T23:59:59Z', True),
('2021-01-01T23:59:59Z', '2021-01-02T00:00:00Z', False),
# BST
('2021-05-31T23:00:00Z', '2021-05-31T22:59:59Z', False),
('2021-05-31T23:00:00Z', '2021-05-31T23:00:00Z', True),
('2021-06-01T22:59:59Z', '2021-05-31T23:00:00Z', True),
('2021-05-31T23:00:00Z', '2021-06-01T22:59:59Z', True),
('2021-06-01T22:59:59Z', '2021-06-01T22:59:59Z', True),
('2021-06-01T22:59:59Z', '2021-06-01T23:00:00Z', False),
))
def test_AlertDate_is_today(now, sample, expected_is_today):
with freeze_time(now):
assert AlertDate(dt_parse(sample)).is_today == expected_is_today
| tests/app/models/test_alert_date.py | 2,635 | 12 hour clock GMT BST | 21 | en | 0.359164 |
# Copyright 2018 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" This template creates an external load balancer. """
import copy
from hashlib import sha1
import json
def set_optional_property(destination, source, prop_name):
""" Copies the property value if present. """
if prop_name in source:
destination[prop_name] = source[prop_name]
def get_backend_service(properties, backend_spec, res_name, project_id):
""" Creates the backend service. """
name = backend_spec.get('resourceName', res_name)
backend_name = backend_spec.get('name', name)
backend_properties = {
'name': backend_name,
'project': project_id,
'loadBalancingScheme': 'EXTERNAL',
'protocol': get_protocol(properties),
}
backend_resource = {
'name': name,
'type': 'backend_service.py',
'properties': backend_properties
}
optional_properties = [
'description',
'backends',
'timeoutSec',
'sessionAffinity',
'connectionDraining',
'backends',
'healthCheck',
'healthChecks',
'portName',
'enableCDN',
'affinityCookieTtlSec'
]
for prop in optional_properties:
set_optional_property(backend_properties, backend_spec, prop)
return [backend_resource], [
{
'name': 'backendServiceName',
'value': backend_name,
},
{
'name': 'backendServiceSelfLink',
'value': '$(ref.{}.selfLink)'.format(name),
},
]
def get_forwarding_rule(properties, target, res_name, project_id):
""" Creates the forwarding rule. """
name = '{}-forwarding-rule'.format(res_name)
rule_properties = {
'name': properties.get('name', res_name),
'project': project_id,
'loadBalancingScheme': 'EXTERNAL',
'target': '$(ref.{}.selfLink)'.format(target['name']),
'IPProtocol': 'TCP',
}
rule_resource = {
'name': name,
'type': 'forwarding_rule.py',
'properties': rule_properties,
'metadata': {
'dependsOn': [target['name']],
},
}
optional_properties = [
'description',
'IPAddress',
'ipVersion',
'portRange',
]
for prop in optional_properties:
set_optional_property(rule_properties, properties, prop)
return [rule_resource], [
{
'name': 'forwardingRuleName',
'value': rule_properties['name'],
},
{
'name': 'forwardingRuleSelfLink',
'value': '$(ref.{}.selfLink)'.format(name),
},
{
'name': 'IPAddress',
'value': '$(ref.{}.IPAddress)'.format(name),
},
]
def get_backend_services(properties, res_name, project_id):
""" Creates all backend services to be used by the load balancer. """
backend_resources = []
backend_outputs_map = {
'backendServiceName': [],
'backendServiceSelfLink': []
}
backend_specs = properties['backendServices']
for backend_spec in backend_specs:
backend_res_name = '{}-backend-service-{}'.format(res_name, sha1(json.dumps(backend_spec).encode('utf-8')).hexdigest()[:10])
resources, outputs = get_backend_service(properties, backend_spec, backend_res_name, project_id)
backend_resources += resources
# Merge outputs with the same name.
for output in outputs:
backend_outputs_map[output['name']].append(output['value'])
backend_outputs = []
for key, value in backend_outputs_map.items():
backend_outputs.append({'name': key + 's', 'value': value})
return backend_resources, backend_outputs
def get_ref(name, prop='selfLink'):
""" Creates reference to a property of a given resource. """
return '$(ref.{}.{})'.format(name, prop)
def update_refs_recursively(properties):
""" Replaces service names with the service selflinks recursively. """
for prop in properties:
value = properties[prop]
if prop == 'defaultService' or prop == 'service':
is_regular_name = not '.' in value and not '/' in value
if is_regular_name:
properties[prop] = get_ref(value)
elif isinstance(value, dict):
update_refs_recursively(value)
elif isinstance(value, list):
for item in value:
if isinstance(item, dict):
update_refs_recursively(item)
def get_url_map(properties, res_name, project_id):
""" Creates a UrlMap resource. """
spec = copy.deepcopy(properties)
spec['project'] = project_id
spec['name'] = properties.get('name', res_name)
update_refs_recursively(spec)
resource = {
'name': res_name,
'type': 'url_map.py',
'properties': spec,
}
self_link = '$(ref.{}.selfLink)'.format(res_name)
return self_link, [resource], [
{
'name': 'urlMapName',
'value': '$(ref.{}.name)'.format(res_name)
},
{
'name': 'urlMapSelfLink',
'value': self_link
}
]
def get_target_proxy(properties, res_name, project_id, bs_resources):
""" Creates a target proxy resource. """
protocol = get_protocol(properties)
depends = []
if 'HTTP' in protocol:
urlMap = copy.deepcopy(properties['urlMap'])
if 'name' not in urlMap and 'name' in properties:
urlMap['name'] = '{}-url-map'.format(properties['name'])
target, resources, outputs = get_url_map(
urlMap,
'{}-url-map'.format(res_name),
project_id
)
depends.append(resources[0]['name'])
else:
depends.append(bs_resources[0]['name'])
target = get_ref(bs_resources[0]['name'])
resources = []
outputs = []
name = '{}-target'.format(res_name)
proxy = {
'name': name,
'type': 'target_proxy.py',
'properties': {
'name': '{}-target'.format(properties.get('name', res_name)),
'project': project_id,
'protocol': protocol,
'target': target,
},
'metadata': {
'dependsOn': [depends],
},
}
for prop in ['proxyHeader', 'quicOverride']:
set_optional_property(proxy['properties'], properties, prop)
outputs.extend(
[
{
'name': 'targetProxyName',
'value': '$(ref.{}.name)'.format(name)
},
{
'name': 'targetProxySelfLink',
'value': '$(ref.{}.selfLink)'.format(name)
},
{
'name': 'targetProxyKind',
'value': '$(ref.{}.kind)'.format(name)
}
]
)
if 'ssl' in properties:
ssl_spec = properties['ssl']
proxy['properties']['ssl'] = ssl_spec
creates_new_certificate = not 'url' in ssl_spec['certificate']
if creates_new_certificate:
outputs.extend(
[
{
'name': 'certificateName',
'value': '$(ref.{}.certificateName)'.format(name)
},
{
'name': 'certificateSelfLink',
'value': '$(ref.{}.certificateSelfLink)'.format(name)
}
]
)
return [proxy] + resources, outputs
def get_protocol(properties):
""" Finds what network protocol to use. """
is_web = 'urlMap' in properties
is_secure = 'ssl' in properties
if is_web:
if is_secure:
return 'HTTPS'
return 'HTTP'
if is_secure:
return 'SSL'
return 'TCP'
def generate_config(context):
""" Entry point for the deployment resources. """
properties = context.properties
project_id = properties.get('project', context.env['project'])
# Forwarding rule + target proxy + backend service = ELB
bs_resources, bs_outputs = get_backend_services(properties, context.env['name'], project_id)
target_resources, target_outputs = get_target_proxy(properties, context.env['name'], project_id, bs_resources)
rule_resources, rule_outputs = get_forwarding_rule(
properties,
target_resources[0],
context.env['name'],
project_id
)
return {
'resources': bs_resources + target_resources + rule_resources,
'outputs': bs_outputs + target_outputs + rule_outputs,
}
| dm/templates/external_load_balancer/external_load_balancer.py | 9,153 | Entry point for the deployment resources.
Creates the backend service.
Creates all backend services to be used by the load balancer.
Creates the forwarding rule.
Finds what network protocol to use.
Creates reference to a property of a given resource.
Creates a target proxy resource.
Creates a UrlMap resource.
Copies the property value if present.
Replaces service names with the service selflinks recursively.
This template creates an external load balancer.
Copyright 2018 Google Inc. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Merge outputs with the same name. Forwarding rule + target proxy + backend service = ELB | 1,132 | en | 0.826715 |
#python exceptions let you deal with
#unexpected results
try:
print(a) #this will throw an exception since a is not found
except:
print("a is not defined!")
#there are specific errors in python
try:
print(a) #this will throw a NameError
except NameError:
print("a is still not defined")
except:
print("Something else went wrong.")
#this will break our program
#since a is not defined
print(a) | exceptions.py | 403 | python exceptions let you deal withunexpected resultsthis will throw an exception since a is not foundthere are specific errors in pythonthis will throw a NameErrorthis will break our programsince a is not defined | 213 | en | 0.879358 |
"""
.. autoclass:: ppci.arch.arch.Architecture
:members:
.. autoclass:: ppci.arch.arch_info.ArchInfo
:members:
.. autoclass:: ppci.arch.arch.Frame
:members:
.. autoclass:: ppci.arch.isa.Isa
:members:
.. autoclass:: ppci.arch.registers.Register
:members: is_colored
.. autoclass:: ppci.arch.encoding.Instruction
:members:
"""
import sys
import platform
from .arch import Architecture, Frame
from .isa import Isa
def get_current_arch():
""" Try to get the architecture for the current platform """
if sys.platform.startswith("win"):
machine = platform.machine()
if machine == "AMD64":
return get_arch("x86_64:wincc")
elif sys.platform in ("linux", "darwin"):
if platform.architecture()[0] == "64bit":
return get_arch("x86_64")
def get_arch(arch):
"""Try to return an architecture instance.
Args:
arch: can be a string in the form of arch:option1:option2
.. doctest::
>>> from ppci.api import get_arch
>>> arch = get_arch('msp430')
>>> arch
msp430-arch
>>> type(arch)
<class 'ppci.arch.msp430.arch.Msp430Arch'>
"""
if isinstance(arch, Architecture):
return arch
elif isinstance(arch, str):
# Horrific import cycle created. TODO: restructure this
from .target_list import create_arch
if ":" in arch:
# We have target with options attached
parts = arch.split(":")
return create_arch(parts[0], options=tuple(parts[1:]))
else:
return create_arch(arch)
raise ValueError("Invalid architecture {}".format(arch))
__all__ = ["Architecture", "Frame", "Isa", "get_arch", "get_current_arch"]
| ppci/arch/__init__.py | 1,755 | Try to return an architecture instance.
Args:
arch: can be a string in the form of arch:option1:option2
.. doctest::
>>> from ppci.api import get_arch
>>> arch = get_arch('msp430')
>>> arch
msp430-arch
>>> type(arch)
<class 'ppci.arch.msp430.arch.Msp430Arch'>
Try to get the architecture for the current platform
.. autoclass:: ppci.arch.arch.Architecture
:members:
.. autoclass:: ppci.arch.arch_info.ArchInfo
:members:
.. autoclass:: ppci.arch.arch.Frame
:members:
.. autoclass:: ppci.arch.isa.Isa
:members:
.. autoclass:: ppci.arch.registers.Register
:members: is_colored
.. autoclass:: ppci.arch.encoding.Instruction
:members:
Horrific import cycle created. TODO: restructure this We have target with options attached | 786 | en | 0.495587 |
# coding: utf-8
"""
OpenAPI Petstore
This spec is mainly for testing Petstore server and contains fake endpoints, models. Please do not use this for any other purpose. Special characters: \" \\ # noqa: E501
The version of the OpenAPI document: 1.0.0
Generated by: https://openapi-generator.tech
"""
import sys
import unittest
import petstore_api
from petstore_api.model.composed_bool import ComposedBool
class TestComposedBool(unittest.TestCase):
"""ComposedBool unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def test_ComposedBool(self):
"""Test ComposedBool"""
# FIXME: construct object with mandatory attributes with example values
# model = ComposedBool() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| samples/openapi3/client/petstore/python-experimental/test/test_composed_bool.py | 839 | ComposedBool unit test stubs
Test ComposedBool
OpenAPI Petstore
This spec is mainly for testing Petstore server and contains fake endpoints, models. Please do not use this for any other purpose. Special characters: " \ # noqa: E501
The version of the OpenAPI document: 1.0.0
Generated by: https://openapi-generator.tech
coding: utf-8 FIXME: construct object with mandatory attributes with example values model = ComposedBool() noqa: E501 | 444 | en | 0.798526 |
from warnings import warn
from functools import partial
from tqdm import tqdm
import torch
import numpy as np
from torch.optim import Adam
from torch.nn import MSELoss
from odl.contrib.torch import OperatorModule
from dival.reconstructors import IterativeReconstructor
from dival.reconstructors.networks.unet import UNet
from dival.util.torch_losses import poisson_loss, tv_loss
from dival.util.constants import MU_MAX
MIN = -1000
MAX = 1000
class DeepImagePriorCTReconstructor(IterativeReconstructor):
"""
CT reconstructor applying DIP with TV regularization (see [2]_).
The DIP was introduced in [1]_.
References
----------
.. [1] V. Lempitsky, A. Vedaldi, and D. Ulyanov, 2018, "Deep Image Prior".
IEEE/CVF Conference on Computer Vision and Pattern Recognition.
https://doi.org/10.1109/CVPR.2018.00984
.. [2] D. Otero Baguer, J. Leuschner, M. Schmidt, 2020, "Computed
Tomography Reconstruction Using Deep Image Prior and Learned
Reconstruction Methods". Inverse Problems.
https://doi.org/10.1088/1361-6420/aba415
"""
HYPER_PARAMS = {
'lr':
{'default': 1e-3,
'range': [1e-5, 1e-1]},
'gamma':
{'default': 1e-4,
'range': [1e-7, 1e-0],
'grid_search_options': {'num_samples': 20}},
'scales':
{'default': 4,
'choices': [3, 4, 5, 6, 7]},
'channels':
{'default': [128] * 5},
'skip_channels':
{'default': [4] * 5},
'iterations':
{'default': 5000,
'range': [1, 50000]},
'loss_function':
{'default': 'mse',
'choices': ['mse', 'poisson']},
'photons_per_pixel': # used by 'poisson' loss function
{'default': 4096,
'range': [1000, 10000]},
'mu_max': # used by 'poisson' loss function
{'default': MU_MAX,
'range': [1., 10000.]}
}
def __init__(self, ray_trafo, callback_func=None,
callback_func_interval=100, show_pbar=True,
torch_manual_seed=10, **kwargs):
"""
Parameters
----------
ray_trafo : `odl.tomo.operators.RayTransform`
The forward operator
callback_func : callable, optional
Callable with signature
``callback_func(iteration, reconstruction, loss)`` that is called
after every `callback_func_interval` iterations, starting
after the first iteration. It is additionally called after the
last iteration.
Note that it differs from the inherited
`IterativeReconstructor.callback` (which is also supported) in that
the latter is of type :class:`odl.solvers.util.callback.Callback`,
which only receives the reconstruction, such that the loss would
have to be recomputed.
callback_func_interval : int, optional
Number of iterations between calls to `callback_func`.
Default: `100`.
show_pbar : bool, optional
Whether to show a tqdm progress bar during reconstruction.
torch_manual_seed : int, optional
Fixed seed to set by ``torch.manual_seed`` before reconstruction.
The default is `10`. It can be set to `None` or `False` to disable
the manual seed.
"""
super().__init__(
reco_space=ray_trafo.domain, observation_space=ray_trafo.range,
**kwargs)
self.callback_func = callback_func
self.ray_trafo = ray_trafo
self.ray_trafo_module = OperatorModule(self.ray_trafo)
self.callback_func = callback_func
self.callback_func_interval = callback_func_interval
self.show_pbar = show_pbar
self.torch_manual_seed = torch_manual_seed
def get_activation(self, layer_index):
return self.model.layer_output(self.net_input, layer_index)
def _reconstruct(self, observation, *args, **kwargs):
if self.torch_manual_seed:
torch.random.manual_seed(self.torch_manual_seed)
output_depth = 1
input_depth = 1
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
self.net_input = 0.1 * \
torch.randn(input_depth, *self.reco_space.shape)[None].to(device)
self.model = UNet(
input_depth,
output_depth,
channels=self.channels[:self.scales],
skip_channels=self.skip_channels[:self.scales],
use_sigmoid=True,
use_norm=True).to(device)
self.optimizer = Adam(self.model.parameters(), lr=self.lr)
y_delta = torch.tensor(np.asarray(observation), dtype=torch.float32)
y_delta = y_delta.view(1, 1, *y_delta.shape)
y_delta = y_delta.to(device)
if self.loss_function == 'mse':
criterion = MSELoss()
elif self.loss_function == 'poisson':
criterion = partial(poisson_loss,
photons_per_pixel=self.photons_per_pixel,
mu_max=self.mu_max)
else:
warn('Unknown loss function, falling back to MSE')
criterion = MSELoss()
best_loss = np.inf
best_output = self.model(self.net_input).detach()
for i in tqdm(range(self.iterations),
desc='DIP', disable=not self.show_pbar):
self.optimizer.zero_grad()
output = self.model(self.net_input)
loss = criterion(self.ray_trafo_module(output),
y_delta) + self.gamma * tv_loss(output)
loss.backward()
torch.nn.utils.clip_grad_norm_(self.model.parameters(), max_norm=1)
self.optimizer.step()
for p in self.model.parameters():
p.data.clamp_(MIN, MAX)
if loss.item() < best_loss:
best_loss = loss.item()
best_output = output.detach()
if (self.callback_func is not None and
(i % self.callback_func_interval == 0
or i == self.iterations-1)):
self.callback_func(
iteration=i,
reconstruction=best_output[0, 0, ...].cpu().numpy(),
loss=best_loss)
if self.callback is not None:
self.callback(self.reco_space.element(
best_output[0, 0, ...].cpu().numpy()))
return self.reco_space.element(best_output[0, 0, ...].cpu().numpy())
| dival/reconstructors/dip_ct_reconstructor.py | 6,654 | CT reconstructor applying DIP with TV regularization (see [2]_).
The DIP was introduced in [1]_.
References
----------
.. [1] V. Lempitsky, A. Vedaldi, and D. Ulyanov, 2018, "Deep Image Prior".
IEEE/CVF Conference on Computer Vision and Pattern Recognition.
https://doi.org/10.1109/CVPR.2018.00984
.. [2] D. Otero Baguer, J. Leuschner, M. Schmidt, 2020, "Computed
Tomography Reconstruction Using Deep Image Prior and Learned
Reconstruction Methods". Inverse Problems.
https://doi.org/10.1088/1361-6420/aba415
Parameters
----------
ray_trafo : `odl.tomo.operators.RayTransform`
The forward operator
callback_func : callable, optional
Callable with signature
``callback_func(iteration, reconstruction, loss)`` that is called
after every `callback_func_interval` iterations, starting
after the first iteration. It is additionally called after the
last iteration.
Note that it differs from the inherited
`IterativeReconstructor.callback` (which is also supported) in that
the latter is of type :class:`odl.solvers.util.callback.Callback`,
which only receives the reconstruction, such that the loss would
have to be recomputed.
callback_func_interval : int, optional
Number of iterations between calls to `callback_func`.
Default: `100`.
show_pbar : bool, optional
Whether to show a tqdm progress bar during reconstruction.
torch_manual_seed : int, optional
Fixed seed to set by ``torch.manual_seed`` before reconstruction.
The default is `10`. It can be set to `None` or `False` to disable
the manual seed.
used by 'poisson' loss function used by 'poisson' loss function | 1,672 | en | 0.782034 |
"""
PAIPASS Oauth2 backend
"""
import re
from .oauth import BaseOAuth2
from ..utils import handle_http_errors, url_add_parameters
from ..exceptions import AuthCanceled, AuthUnknownError
class PaipassOAuth2(BaseOAuth2):
"""Facebook OAuth2 authentication backend"""
name = "paipass"
ID_KEY = "email"
REDIRECT_STATE = False
STATE_PARAMETER = False
ACCESS_TOKEN_METHOD = "POST"
SCOPE_SEPARATOR = r" "
AUTHORIZATION_URL = "https://api.demo.p19dev.com/oauth/authorize"
ACCESS_TOKEN_URL = "https://api.demo.p19dev.com/oauth/token"
USER_DATA_URL = "https://api.demo.p19dev.com/attributes/paipass/user.data/0"
EXTRA_DATA = [("expires", "expires"), ]
def auth_complete_credentials(self):
return self.get_key_and_secret()
def get_user_details(self, response):
"""Return user details from Facebook account"""
email = response.get("email")
return {"email": email, "username": email.split("@")[0]}
def user_data(self, access_token, *args, **kwargs):
"""Loads user data from service"""
params = self.setting("PROFILE_EXTRA_PARAMS", {})
response = kwargs.get('response') or {}
params["access_token"] = access_token
headers = {
"Authorization": "%s %s" % (
response.get("token_type", "Bearer").capitalize(),
access_token),
"Accept": 'application/json',
"Content-type": 'application/json;charset=utf-8'}
return self.get_json(self.USER_DATA_URL,
params=params, headers=headers)
def auth_params(self, state=None):
params = super(PaipassOAuth2, self).auth_params(state)
regex = re.compile(r"\:(80|443)\/")
params["redirect_uri"] = regex.sub("/", params["redirect_uri"])
return params
def get_redirect_uri(self, state=None):
"""Build redirect with redirect_state parameter."""
regex = re.compile(r"\:(80|443)\/")
uri = regex.sub("/", self.redirect_uri)
if self.REDIRECT_STATE and state:
uri = url_add_parameters(uri, {'redirect_state': state})
return uri
@handle_http_errors
def do_auth(self, access_token, *args, **kwargs):
"""Finish the auth process once the access_token was retrieved"""
data = self.user_data(access_token, *args, **kwargs)
response = kwargs.get('response') or {}
response.update(data or {})
if 'access_token' not in response:
response['access_token'] = access_token
kwargs.update({'response': response, 'backend': self})
return self.strategy.authenticate(*args, **kwargs)
| social_core/backends/paipass.py | 2,684 | Facebook OAuth2 authentication backend
Finish the auth process once the access_token was retrieved
Build redirect with redirect_state parameter.
Return user details from Facebook account
Loads user data from service
PAIPASS Oauth2 backend | 238 | en | 0.777786 |
"""ConnManagerMQTT containing script"""
import _thread
import time
import random
import logging
from .uconn_mqtt import UConnMQTT
from . import exceptions
class ConnManagerMQTT(object):
"""
UconnMQTT wrapper that guarantee delivery to addressee
"""
_SENDER = 'sender'
_DESTINATION = 'destination'
_MESSAGE = 'message'
def __init__(self):
"""
Initialization of ConnManager
"""
logging.info('Initializing ConnmanagerMQTT')
self.__connection = UConnMQTT()
self.__message_number = random.randint(0, 65536)
self.__sent_messages = dict()
self.__callback = None
self.__callback_object = None
def disconnect(self):
"""
Disconnection from server
"""
logging.info('Disconnecting...')
self.__connection.disconnect()
def subscribe(self, topic, callback_object, callback):
"""
Subscribe on topic
:param str topic: Topic for subscription
:param method callback: Callback for received message
"""
logging.info("Subscribing for {0}".format(topic))
if not callable(callback):
raise exceptions.UtimUncallableCallbackError
self.__callback = callback
self.__callback_object = callback_object
self.__connection.subscribe(topic, self, ConnManagerMQTT._on_message)
def unsubscribe(self, topic):
"""
Unsubscribe from topic
:param str topic: Topic for subscription cancelling
"""
logging.info("Unsubscribing from {0}".format(topic))
self.__connection.unsubscribe(topic)
def publish(self, sender, destination, message):
"""
Publish message
:param sender: Message sender
:param destination: Message destination
:param message: The message
"""
id = self.__message_number
self.__message_number = (self.__message_number + 1) % 65536
out_message = b'\x01' + id.to_bytes(2, 'big') + message
logging.info("Publishing {0} to topic {1}".format(message, destination))
self.__connection.publish(sender, destination, out_message)
self.__sent_messages[id] = {self._SENDER: sender,
self._DESTINATION: destination,
self._MESSAGE: message}
_thread.start_new_thread(self._republish, (id,))
def _republish(self, id):
"""
Check if message was delivered and republish if not
:param id: Message ID
"""
logging.info("_publish for {0} started".format(id))
time.sleep(10)
while id in self.__sent_messages.keys():
try:
logging.info("Message {0} wasn\'t delivered".format(id))
message = self.__sent_messages[id]
self.__connection.publish(message[self._SENDER], message[self._DESTINATION],
b'\x01' + id.to_bytes(2, 'big') + message[self._MESSAGE])
time.sleep(5)
except KeyError:
logging.error("Message was already deleted from republish")
break
logging.info("Message {0} was delivered".format(id))
def _on_message(self, sender, message):
"""
Message receiving callback
:param sender: Message sender
:param message: The message
"""
logging.info("Received message {0} from {1}".format(message, sender))
if len(message) < 3:
logging.info('Message is too short to be something!')
else:
if message[:1] == b'\x02':
try:
logging.info('Received ack, deleting message from sent')
id = int.from_bytes(message[1:3], 'big')
if id in self.__sent_messages.keys():
self.__sent_messages.pop(id)
except KeyError:
logging.error("Message was already deleted from republish")
else:
logging.info('Received message, sending ack...')
ack_message = b'\x02' + message[1:3]
self.__connection.publish(b'ack', sender.decode(), ack_message)
self.__callback(self.__callback_object, sender, message[3:])
| utilities/connmanagermqtt.py | 4,343 | UconnMQTT wrapper that guarantee delivery to addressee
Initialization of ConnManager
Message receiving callback
:param sender: Message sender
:param message: The message
Check if message was delivered and republish if not
:param id: Message ID
Disconnection from server
Publish message
:param sender: Message sender
:param destination: Message destination
:param message: The message
Subscribe on topic
:param str topic: Topic for subscription
:param method callback: Callback for received message
Unsubscribe from topic
:param str topic: Topic for subscription cancelling
ConnManagerMQTT containing script | 611 | en | 0.618187 |
from typing import Tuple
import torch as th
import torch.nn as nn
from torchvision import transforms
from autoencoding_rl.latent_extractors.autoencoder.SimpleEncoder import SimpleEncoder
from autoencoding_rl.latent_extractors.autoencoder.SimpleDecoder import SimpleDecoder
from autoencoding_rl.utils import Transition
class DynAutoencoder(nn.Module):
def __init__(self, observation_width: int,
observation_height: int,
observation_channels_num: int,
dyn_encoding_size: int,
static_encoding_size: int,
action_size: int,
dynamics_nn_arch: Tuple[int, int]):
super().__init__()
self._observation_height = observation_height
self._observation_width = observation_width
self._dyn_encoding_size = dyn_encoding_size
self._static_encoding_size = static_encoding_size
self._action_size = action_size
self._observation_channels_num = observation_channels_num
self._dynamics_nn_arch = dynamics_nn_arch
self._dynEncoder = SimpleEncoder(encoding_size = self._dyn_encoding_size,
image_channels_num = self._observation_channels_num,
net_input_width = self._observation_width,
net_input_height = self._observation_height)
if self._static_encoding_size != 0:
self._staticEncoder = SimpleEncoder(encoding_size = self._static_encoding_size,
image_channels_num = self._observation_channels_num,
net_input_width = self._observation_width,
net_input_height = self._observation_height)
else:
self._staticEncoder = None
self._dynamics_net = th.nn.Sequential( th.nn.Linear(self._dyn_encoding_size+self._action_size, self._dynamics_nn_arch[0]),
th.nn.ReLU(),
th.nn.Linear(self._dynamics_nn_arch[0], self._dynamics_nn_arch[1]),
th.nn.ReLU(),
th.nn.Linear(self._dynamics_nn_arch[1], self._dyn_encoding_size+1))
self._decoder = SimpleDecoder( encoding_size = self._dyn_encoding_size + self._static_encoding_size,
image_channels_num = self._observation_channels_num,
net_output_width = self._observation_width,
net_output_height = self._observation_height)
self._resizeToInput = transforms.Resize((self._observation_height,self._observation_width))
@property
def observation_height(self):
return self._observation_height
@property
def observation_width(self):
return self._observation_width
@property
def dyn_encoding_size(self):
return self._dyn_encoding_size
@property
def static_encoding_size(self):
return self._static_encoding_size
@property
def action_size(self):
return self._action_size
def forward(self, transition_batch : Transition):
observation_batch = transition_batch.observation
action_batch = transition_batch.action
assert action_batch.size()[0] == observation_batch.size()[0], \
f"Observation batch and action batch should have the same length. Action batch size = {action_batch.size()[0]}, observation batch size = {observation_batch.size()[0]}. Action tensor size = {action_batch.size()[0]}. Observation tensor size = {observation_batch.size()[0]}"
assert observation_batch.size() == (observation_batch.size()[0], self._observation_channels_num, self._observation_height, self._observation_width), \
f"Observation size should be (Any, {self._observation_channels_num}, {self._observation_height}, {self._observation_width}), instead it is {observation_batch.size()}"
assert action_batch.size()[1] == self._action_size, \
f"Each action should have size {self._action_size}, not {action_batch.size()[1]}. Tensor has size {action_batch.size()}"
#Compute 'static' features encoding
state_s_0_batch = self.encode_static(observation_batch) #Gives a (batch_size, static_encoding_size) output
#Compute 'dynamic' features encoding
state_d_0_batch = self.encode_dynamic(observation_batch) #Gives a (batch_size, dyn_encoding_size) output
state_d_1_batch, reward_d_1_batch = self.predict_dynamics(state_d_0_batch, action_batch)
#state_d_1_batch now has size (batch_size, dyn_encoding_size)
#reward_d_1_batch now has size (batch_size, 1) (still 2-dimensional)
#Will now use 'static' features vectors and predicted states to predict the observation
observation_1_batch = self.decode(state_s_0_batch,state_d_1_batch) #Gives a (batch_size, observations_channels_num, observation_height, observation_width) output
return observation_1_batch, reward_d_1_batch
def encode_dynamic(self, observation_batch : th.Tensor):
assert observation_batch.size() == (observation_batch.size()[0], self._observation_channels_num, self._observation_height, self._observation_width), \
f"Observation size should be (Any, {self._observation_channels_num}, {self._observation_height}, {self._observation_width}), instead it is {observation_batch.size()}"
return self._dynEncoder(observation_batch)
def encode_static(self, observation_batch : th.Tensor):
assert observation_batch.size() == (observation_batch.size()[0], self._observation_channels_num, self._observation_height, self._observation_width), \
f"Observation size should be (Any, {self._observation_channels_num}, {self._observation_height}, {self._observation_width}), instead it is {observation_batch.size()}"
if self._staticEncoder is not None:
return self._staticEncoder(observation_batch)
else:
return th.empty([observation_batch.size()[0],0]).to(observation_batch.device)
def decode(self, static_encoding_batch : th.Tensor, dynamic_encoding_batch : th.Tensor):
assert static_encoding_batch.size()[0] == dynamic_encoding_batch.size()[0], \
f"static encoding batch and dynamic encoding batch have different sizes, respectively {static_encoding_batch.size()[0]} and {dynamic_encoding_batch.size()[0]}"
assert dynamic_encoding_batch.size() == (dynamic_encoding_batch.size()[0], self._dyn_encoding_size), \
f"dynamic_encoding have wrong size, should be {(dynamic_encoding_batch.size()[0], self._dyn_encoding_size)}, but it's {dynamic_encoding_batch.size()}"
assert static_encoding_batch.size() == (static_encoding_batch.size()[0], self._static_encoding_size), \
f"static_encoding_batch have wrong size, should be {(static_encoding_batch.size()[0], self._static_encoding_size)}, but it's {static_encoding_batch.size()}"
#Combine the two vectors
state_batch = th.cat((static_encoding_batch, dynamic_encoding_batch), 1) #Gives a (batch_size, dyn_encoding_size+static_encoding_size) output
#Predict the observation
return self._decoder(state_batch) #Gives a (batch_size, observations_channels_num, observation_height, observation_width) output
def predict_dynamics(self, state_batch : th.Tensor, action_batch : th.Tensor):
assert state_batch.size()[0] == action_batch.size()[0], \
f"state batch and action batch have different sizes, respectively {state_batch.size()[0]} and {action_batch.size()[0]}"
assert state_batch.size()[1] == self._dyn_encoding_size, \
f"States have wrong size, should be {self._dyn_encoding_size}, but it's {state_batch.size()[1]}"
assert action_batch.size()[1] == self._action_size, \
f"Actions have wrong size, should be {self._action_size} but it's {action_batch.size()[1]}"
#Concatenate states and actions
state_action_batch = th.cat((state_batch, action_batch), 1) #Gives a (batch_size, dyn_encoding_size+action_size) output
nextstate_reward_batch = self._dynamics_net(state_action_batch) #Gives a (batch_size, dyn_encoding_size+1) output
nextstate_batch, reward_batch = th.split(nextstate_reward_batch, [self._dyn_encoding_size, 1], 1)
#nextstate_batch now has size (batch_size, dyn_encoding_size)
#reward_batch now has size (batch_size, 1) (still 2-dimensional)
return nextstate_batch, reward_batch
def preprocess_observations(self, observation_batch : th.Tensor):
resized_batch = self._resizeToInput(observation_batch)
# Input should be in the [0,1] range, as this is what torchvision.transforms.ToTensor does
# We move it to [-1,1]
normalized = resized_batch*2 - 1
return normalized
#return resized_batch
def postprocess_observations(self, observation_batch : th.Tensor):
return (observation_batch + 1)/2
| src/autoencoding_rl/latent_extractors/dyn_autoencoder/DynAutoencoder.py | 9,340 | Compute 'static' features encodingGives a (batch_size, static_encoding_size) outputCompute 'dynamic' features encodingGives a (batch_size, dyn_encoding_size) outputstate_d_1_batch now has size (batch_size, dyn_encoding_size)reward_d_1_batch now has size (batch_size, 1) (still 2-dimensional)Will now use 'static' features vectors and predicted states to predict the observationGives a (batch_size, observations_channels_num, observation_height, observation_width) outputCombine the two vectorsGives a (batch_size, dyn_encoding_size+static_encoding_size) outputPredict the observationGives a (batch_size, observations_channels_num, observation_height, observation_width) outputConcatenate states and actionsGives a (batch_size, dyn_encoding_size+action_size) outputGives a (batch_size, dyn_encoding_size+1) outputnextstate_batch now has size (batch_size, dyn_encoding_size)reward_batch now has size (batch_size, 1) (still 2-dimensional) Input should be in the [0,1] range, as this is what torchvision.transforms.ToTensor does We move it to [-1,1]return resized_batch | 1,065 | en | 0.752529 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Nov 10 18:18:26 2021
@author: Paolo Cozzi <paolo.cozzi@ibba.cnr.it>
"""
import click
import logging
import datetime
from pathlib import Path
from mongoengine.errors import DoesNotExist
from src import __version__
from src.data.common import WORKING_ASSEMBLIES, PLINK_SPECIES_OPT
from src.features.smarterdb import (
global_connection, SmarterInfo)
logger = logging.getLogger(__name__)
@click.command()
def main():
"""Update SMARTER database statuses"""
logger.info(f"{Path(__file__).name} started")
try:
database = SmarterInfo.objects.get(id="smarter")
logger.debug(f"Found: {database}")
except DoesNotExist:
logger.warning("Smarter database status was never tracked")
database = SmarterInfo(id="smarter")
# update stuff
database.version = __version__
database.working_assemblies = WORKING_ASSEMBLIES
database.plink_specie_opt = PLINK_SPECIES_OPT
database.last_updated = datetime.datetime.now()
database.save()
logger.info("Database status updated")
logger.info(f"{Path(__file__).name} ended")
if __name__ == '__main__':
log_fmt = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
logging.basicConfig(level=logging.INFO, format=log_fmt)
# connect to database
global_connection()
main()
| src/data/update_db_status.py | 1,380 | Update SMARTER database statuses
Created on Wed Nov 10 18:18:26 2021
@author: Paolo Cozzi <paolo.cozzi@ibba.cnr.it>
!/usr/bin/env python3 -*- coding: utf-8 -*- update stuff connect to database | 194 | en | 0.534542 |
# encoding: utf-8
"""
The *pathspec* package provides pattern matching for file paths. So far
this only includes Git's wildmatch pattern matching (the style used for
".gitignore" files).
The following classes are imported and made available from the root of
the `pathspec` package:
- :class:`pathspec.pathspec.PathSpec`
- :class:`pathspec.pattern.Pattern`
- :class:`pathspec.pattern.RegexPattern`
- :class:`pathspec.util.RecursionError`
The following functions are also imported:
- :func:`pathspec.util.iter_tree`
- :func:`pathspec.util.lookup_pattern`
- :func:`pathspec.util.match_files`
"""
from __future__ import unicode_literals
from .pathspec import PathSpec
from .pattern import Pattern, RegexPattern
from .util import iter_tree, lookup_pattern, match_files, RecursionError
from ._meta import (
__author__,
__copyright__,
__credits__,
__license__,
__version__,
)
# Load pattern implementations.
from . import patterns
# Expose `GitIgnorePattern` class in the root module for backward
# compatibility with v0.4.
from .patterns.gitwildmatch import GitIgnorePattern
| venv/Lib/site-packages/pathspec/__init__.py | 1,085 | The *pathspec* package provides pattern matching for file paths. So far
this only includes Git's wildmatch pattern matching (the style used for
".gitignore" files).
The following classes are imported and made available from the root of
the `pathspec` package:
- :class:`pathspec.pathspec.PathSpec`
- :class:`pathspec.pattern.Pattern`
- :class:`pathspec.pattern.RegexPattern`
- :class:`pathspec.util.RecursionError`
The following functions are also imported:
- :func:`pathspec.util.iter_tree`
- :func:`pathspec.util.lookup_pattern`
- :func:`pathspec.util.match_files`
encoding: utf-8 Load pattern implementations. Expose `GitIgnorePattern` class in the root module for backward compatibility with v0.4. | 710 | en | 0.595565 |
#!/usr/bin/python3
# *****************************************************************************
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
# ******************************************************************************
import argparse
import os
import sys
from datalab.notebook_lib import *
from fabric import *
parser = argparse.ArgumentParser()
parser.add_argument('--hostname', type=str, default='')
parser.add_argument('--keyfile', type=str, default='')
parser.add_argument('--os_user', type=str, default='')
parser.add_argument('--application', type=str, default='')
args = parser.parse_args()
def general_clean():
try:
conn.sudo('systemctl stop ungit')
conn.sudo('systemctl stop inactive.timer')
conn.sudo('rm -f /etc/systemd/system/inactive.service')
conn.sudo('rm -f /etc/systemd/system/inactive.timer')
conn.sudo('rm -rf /opt/inactivity')
conn.sudo('npm -g uninstall ungit')
conn.sudo('rm -f /etc/systemd/system/ungit.service')
conn.sudo('systemctl daemon-reload')
remove_os_pkg(['nodejs', 'npm'])
conn.sudo('sed -i "/spark.*.memory/d" /opt/spark/conf/spark-defaults.conf')
except Exception as err:
print('Error: {0}'.format(err))
sys.exit(1)
def clean_jupyter():
try:
conn.sudo('systemctl stop jupyter-notebook')
conn.sudo('pip3 uninstall -y notebook jupyter')
conn.sudo('rm -rf /usr/local/share/jupyter/')
conn.sudo('rm -rf /home/{}/.jupyter/'.format(args.os_user))
conn.sudo('rm -rf /home/{}/.ipython/'.format(args.os_user))
conn.sudo('rm -rf /home/{}/.ipynb_checkpoints/'.format(args.os_user))
conn.sudo('rm -rf /home/{}/.local/share/jupyter/'.format(args.os_user))
conn.sudo('rm -f /etc/systemd/system/jupyter-notebook.service')
conn.sudo('systemctl daemon-reload')
except Exception as err:
print('Error: {0}'.format(err))
sys.exit(1)
def clean_jupyterlab():
try:
conn.sudo('systemctl stop jupyterlab-notebook')
conn.sudo('pip3 uninstall -y jupyterlab')
#conn.sudo('rm -rf /usr/local/share/jupyter/')
conn.sudo('rm -rf /home/{}/.jupyter/'.format(args.os_user))
conn.sudo('rm -rf /home/{}/.ipython/'.format(args.os_user))
conn.sudo('rm -rf /home/{}/.ipynb_checkpoints/'.format(args.os_user))
conn.sudo('rm -rf /home/{}/.local/share/jupyter/'.format(args.os_user))
conn.sudo('rm -f /etc/systemd/system/jupyterlab-notebook.service')
conn.sudo('systemctl daemon-reload')
except Exception as err:
print('Error: {0}'.format(err))
sys.exit(1)
def clean_zeppelin():
try:
conn.sudo('systemctl stop zeppelin-notebook')
conn.sudo('rm -rf /opt/zeppelin* /var/log/zeppelin /var/run/zeppelin')
if os.environ['notebook_multiple_clusters'] == 'true':
conn.sudo('systemctl stop livy-server')
conn.sudo('rm -rf /opt/livy* /var/run/livy')
conn.sudo('rm -f /etc/systemd/system/livy-server.service')
conn.sudo('rm -f /etc/systemd/system/zeppelin-notebook.service')
conn.sudo('systemctl daemon-reload')
except Exception as err:
print('Error: {0}'.format(err))
sys.exit(1)
def clean_rstudio():
try:
remove_os_pkg(['rstudio-server'])
conn.sudo('rm -f /home/{}/.Rprofile'.format(args.os_user))
conn.sudo('rm -f /home/{}/.Renviron'.format(args.os_user))
except Exception as err:
print('Error:', str(err))
sys.exit(1)
def clean_tensor():
try:
clean_jupyter()
conn.sudo('systemctl stop tensorboard')
conn.sudo('systemctl disable tensorboard')
conn.sudo('systemctl daemon-reload')
except Exception as err:
print('Error: {0}'.format(err))
sys.exit(1)
def clean_tensor_rstudio():
try:
clean_rstudio()
conn.sudo('systemctl stop tensorboard')
conn.sudo('systemctl disable tensorboard')
conn.sudo('systemctl daemon-reload')
except Exception as err:
print('Error: {0}'.format(err))
sys.exit(1)
def clean_tensor_jupyterlab():
try:
clean_jupyterlab()
conn.sudo('systemctl stop tensorboard')
conn.sudo('systemctl disable tensorboard')
conn.sudo('systemctl daemon-reload')
except Exception as err:
print('Error: {0}'.format(err))
sys.exit(1)
def clean_deeplearning():
try:
conn.sudo('systemctl stop ungit')
conn.sudo('systemctl stop inactive.timer')
conn.sudo('rm -f /etc/systemd/system/inactive.service')
conn.sudo('rm -f /etc/systemd/system/inactive.timer')
conn.sudo('rm -rf /opt/inactivity')
conn.sudo('npm -g uninstall ungit')
conn.sudo('rm -f /etc/systemd/system/ungit.service')
conn.sudo('systemctl daemon-reload')
remove_os_pkg(['nodejs', 'npm'])
conn.sudo('sed -i "/spark.*.memory/d" /opt/spark/conf/spark-defaults.conf')
# conn.sudo('systemctl stop tensorboard')
# conn.sudo('systemctl disable tensorboard')
# conn.sudo('systemctl daemon-reload')
clean_jupyter()
except Exception as err:
print('Error: {0}'.format(err))
sys.exit(1)
if __name__ == "__main__":
print('Configure connections')
global conn
conn = datalab.fab.init_datalab_connection(args.hostname, args.os_user, args.keyfile)
if os.environ['conf_cloud_provider'] == 'azure':
from datalab.actions_lib import ensure_right_mount_paths
ensure_right_mount_paths()
de_master_name = '{}-{}-{}-de-{}-m'.format(
os.environ['conf_service_base_name'],
os.environ['project_name'],
os.environ['endpoint_name'],
os.environ['computational_name'])
de_ami_id = AzureMeta().get_instance_image(os.environ['azure_resource_group_name'],
de_master_name)
default_ami_id = 'default'
else:
de_master_name = '{}-{}-{}-de-{}-m'.format(
os.environ['conf_service_base_name'],
os.environ['project_name'],
os.environ['endpoint_name'],
os.environ['computational_name'])
de_ami_id = get_ami_id_by_instance_name(de_master_name)
default_ami_id = get_ami_id(
os.environ['aws_{}_image_name'.format(os.environ['conf_os_family'])])
if de_ami_id != default_ami_id:
if args.application in os.environ['dataengine_image_notebooks'].split(','):
if args.application == 'deeplearning':
clean_deeplearning()
else:
general_clean()
if args.application == 'jupyter':
clean_jupyter()
elif args.application == 'zeppelin':
clean_zeppelin()
elif args.application == 'rstudio':
clean_rstudio()
elif args.application == 'tensor':
clean_tensor()
elif args.application == 'tensor-rstudio':
clean_tensor_rstudio()
elif args.application == 'tensor-jupyterlab':
clean_tensor_jupyterlab()
else:
print('Found default ami, do not make clean')
#conn.close()
sys.exit(0) | infrastructure-provisioning/src/general/scripts/os/common_clean_instance.py | 8,118 | !/usr/bin/python3 ***************************************************************************** Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with this work for additional information regarding copyright ownership. The ASF licenses this file to you under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ******************************************************************************conn.sudo('rm -rf /usr/local/share/jupyter/') conn.sudo('systemctl stop tensorboard') conn.sudo('systemctl disable tensorboard') conn.sudo('systemctl daemon-reload')conn.close() | 1,104 | en | 0.763016 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Mar 27 17:18:43 2020
@author: admangli
"""
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import math
dataset = pd.read_csv('Ads_CTR_Optimisation.csv').values
#%%
slot_machines = 10
#%% Random ad selection reward
import random
random_reward = 0
for i in range(len(dataset)):
random_reward += dataset[i, random.randint(0, slot_machines - 1)]
#%%
number_of_ad_selections = [0]*slot_machines
reward_sums = [0]*slot_machines
ad_selection_sequence = []
UCB_range = np.zeros((slot_machines, 2)) # To get an idea of underlying distributino
# Generate initial seed, selecting each machine at least once randomly
for round in range(0, slot_machines):
target_ad = random.randint(0, slot_machines - 1)
while (number_of_ad_selections[target_ad] == 1):
target_ad = random.randint(0, slot_machines - 1)
number_of_ad_selections[target_ad] += 1
reward_sums[target_ad] += dataset[round][target_ad]
ad_selection_sequence.append(target_ad)
for round in range(slot_machines, len(dataset)):
# Calculate Ri and Delta for each ad for the current round
Ri = [0]*slot_machines
Deltai = [0]*slot_machines
max_UCB = 0
target_ad = -1
for ad in range(0, slot_machines):
Ri[ad] = reward_sums[ad] / number_of_ad_selections[ad]
Deltai[ad] = math.sqrt(1.5 * math.log(round + 1)/number_of_ad_selections[ad])
UCB_range[ad, 0] = Ri[ad] + Deltai[ad]
UCB_range[ad, 1] = Ri[ad] - Deltai[ad]
if UCB_range[ad, 0] > max_UCB: # Pick the ad with maximum UCB = Ri + Delta for current round
max_UCB = UCB_range[ad, 0]
target_ad = ad
# Increment selected ad's reward and number of selections
if target_ad != -1:
number_of_ad_selections[target_ad] += 1
reward_sums[target_ad] += dataset[round][target_ad]
ad_selection_sequence.append(target_ad)
#%% Visualize results
# Plot a histogram showing how many times each ad was selected
plt.hist(ad_selection_sequence)
plt.xlabel('Ad Number')
plt.ylabel('Number of selections')
plt.title('Ad selection comparision')
plt.show() | Machine Learning/Sklearn Implementations/Reinforcement Learning/Upper_Confidence_Bound.py | 2,201 | Created on Fri Mar 27 17:18:43 2020
@author: admangli
!/usr/bin/env python3 -*- coding: utf-8 -*-%%%% Random ad selection reward%% To get an idea of underlying distributino Generate initial seed, selecting each machine at least once randomly Calculate Ri and Delta for each ad for the current round Pick the ad with maximum UCB = Ri + Delta for current round Increment selected ad's reward and number of selections%% Visualize results Plot a histogram showing how many times each ad was selected | 497 | en | 0.854125 |
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from .base import Loss
class AdaCos(Loss):
"""PyTorch implementation of AdaCos. See Ref[1] for paper
This implementation is different from the most open-source implementations in following ways:
1) expects raw logits of size (bs x num_classes) not (bs, embedding_size)
2) despite AdaCos being dynamic, still add an optional margin parameter
3) calculate running average stats of B and θ, not batch-wise stats as in original paper
4) normalize input logits, not embeddings and weights
Args:
margin (float): margin in radians
momentum (float): momentum for running average of B and θ
Input:
y_pred (torch.Tensor): shape BS x N_classes
y_true (torch.Tensor): one-hot encoded. shape BS x N_classes
Reference:
[1] Adaptively Scaling Cosine Logits for Effectively Learning Deep Face Representations
"""
def __init__(self, embedding_size, num_classes, final_criterion, margin=0, momentum=0.95):
super(AdaCos, self).__init__()
self.final_criterion = final_criterion
self.margin = margin
self.momentum = momentum
self.prev_s = 10
self.running_B = 1000 # default value is chosen so that initial S is ~10
self.running_theta = math.pi / 4
self.eps = 1e-7
self.register_parameter("weight", torch.nn.Parameter(torch.zeros(num_classes, embedding_size)))
nn.init.xavier_uniform_(self.weight)
self.idx = 0
def forward(self, embedding, y_true):
cos_theta = F.linear(F.normalize(embedding), F.normalize(self.weight)).clamp(-1 + self.eps, 1 - self.eps)
# cos_theta = torch.cos(torch.acos(cos_theta + self.margin))
if y_true.dim() != 1:
y_true_one_hot = y_true.float()
else:
y_true_one_hot = torch.zeros_like(cos_theta)
y_true_one_hot.scatter_(1, y_true.unsqueeze(1), 1.0)
with torch.no_grad():
B_batch = cos_theta[y_true_one_hot.eq(0)].mul(self.prev_s).exp().sum().div(embedding.size(0))
self.running_B = self.running_B * self.momentum + B_batch * (1 - self.momentum)
theta = torch.acos(cos_theta.clamp(-1 + self.eps, 1 - self.eps))
# originally authors use median, but I use mean
theta_batch = theta[y_true_one_hot.ne(0)].mean().clamp_max(math.pi / 4)
self.running_theta = self.running_theta * self.momentum + theta_batch * (1 - self.momentum)
self.prev_s = self.running_B.log() / torch.cos(self.running_theta)
self.idx += 1
if self.idx % 1000 == 0:
print(
f"\nRunning B: {self.running_B:.2f}. Running theta: {self.running_theta:.2f}. Running S: {self.prev_s:.2f}"
)
return self.final_criterion(cos_theta * self.prev_s, y_true_one_hot)
| pytorch_tools/losses/angular.py | 2,914 | PyTorch implementation of AdaCos. See Ref[1] for paper
This implementation is different from the most open-source implementations in following ways:
1) expects raw logits of size (bs x num_classes) not (bs, embedding_size)
2) despite AdaCos being dynamic, still add an optional margin parameter
3) calculate running average stats of B and θ, not batch-wise stats as in original paper
4) normalize input logits, not embeddings and weights
Args:
margin (float): margin in radians
momentum (float): momentum for running average of B and θ
Input:
y_pred (torch.Tensor): shape BS x N_classes
y_true (torch.Tensor): one-hot encoded. shape BS x N_classes
Reference:
[1] Adaptively Scaling Cosine Logits for Effectively Learning Deep Face Representations
default value is chosen so that initial S is ~10 cos_theta = torch.cos(torch.acos(cos_theta + self.margin)) originally authors use median, but I use mean | 925 | en | 0.709046 |
"""
IGN Instituto Geográfico Nacional Sismología Feed.
Fetches GeoRSS feed from IGN Instituto Geográfico Nacional Sismología.
"""
from datetime import datetime
from typing import Optional
import dateparser as dateparser
from georss_client import FeedEntry, GeoRssFeed
from georss_client.consts import CUSTOM_ATTRIBUTE
from georss_client.feed_manager import FeedManagerBase
ATTRIBUTION = "Instituto Geográfico Nacional"
IMAGE_URL_PATTERN = (
"http://www.ign.es/web/resources/sismologia/www/"
"dir_images_terremotos/detalle/{}.gif"
)
REGEXP_ATTR_MAGNITUDE = r"magnitud (?P<{}>[^ ]+) ".format(CUSTOM_ATTRIBUTE)
REGEXP_ATTR_REGION = r"magnitud [^ ]+ en (?P<{}>[A-ZÁÉÓÜÑ0-9 \-\.]+) en".format(
CUSTOM_ATTRIBUTE
)
REGEXP_ATTR_PUBLISHED_DATE = r"-Info.terremoto: (?P<{}>.+)$".format(CUSTOM_ATTRIBUTE)
REGEXP_ATTR_SHORT_ID = (
r"http:\/\/www\.ign\.es\/web\/ign\/portal\/"
r"sis-catalogo-terremotos\/-\/catalogo-terremotos\/"
r"detailTerremoto\?evid=(?P<{}>\w+)$".format(CUSTOM_ATTRIBUTE)
)
URL = "http://www.ign.es/ign/RssTools/sismologia.xml"
class IgnSismologiaFeedManager(FeedManagerBase):
"""Feed Manager for IGN Sismología feed."""
def __init__(
self,
generate_callback,
update_callback,
remove_callback,
coordinates,
filter_radius=None,
filter_minimum_magnitude=None,
):
"""Initialize the IGN Sismología Feed Manager."""
feed = IgnSismologiaFeed(
coordinates,
filter_radius=filter_radius,
filter_minimum_magnitude=filter_minimum_magnitude,
)
super().__init__(feed, generate_callback, update_callback, remove_callback)
class IgnSismologiaFeed(GeoRssFeed):
"""IGN Sismología feed."""
def __init__(
self, home_coordinates, filter_radius=None, filter_minimum_magnitude=None
):
"""Initialise this service."""
super().__init__(home_coordinates, URL, filter_radius=filter_radius)
self._filter_minimum_magnitude = filter_minimum_magnitude
def __repr__(self):
"""Return string representation of this feed."""
return "<{}(home={}, url={}, radius={}, magnitude={})>".format(
self.__class__.__name__,
self._home_coordinates,
self._url,
self._filter_radius,
self._filter_minimum_magnitude,
)
def _new_entry(self, home_coordinates, rss_entry, global_data):
"""Generate a new entry."""
return IgnSismologiaFeedEntry(home_coordinates, rss_entry)
def _filter_entries(self, entries):
"""Filter the provided entries."""
entries = super()._filter_entries(entries)
if self._filter_minimum_magnitude:
# Return only entries that have an actual magnitude value, and
# the value is equal or above the defined threshold.
return list(
filter(
lambda entry: entry.magnitude
and entry.magnitude >= self._filter_minimum_magnitude,
entries,
)
)
return entries
class IgnSismologiaFeedEntry(FeedEntry):
"""IGN Sismología feed entry."""
def __init__(self, home_coordinates, rss_entry):
"""Initialise this service."""
super().__init__(home_coordinates, rss_entry)
@property
def attribution(self) -> str:
"""Return the attribution of this entry."""
return ATTRIBUTION
@property
def published(self) -> Optional[datetime]:
"""Return the published date of this entry."""
published_date = self._search_in_title(REGEXP_ATTR_PUBLISHED_DATE)
if published_date:
published_date = dateparser.parse(published_date)
return published_date
@property
def magnitude(self) -> Optional[float]:
"""Return the magnitude of this entry."""
magnitude = self._search_in_description(REGEXP_ATTR_MAGNITUDE)
if magnitude:
magnitude = float(magnitude)
return magnitude
@property
def region(self) -> Optional[float]:
"""Return the region of this entry."""
return self._search_in_description(REGEXP_ATTR_REGION)
def _short_id(self) -> Optional[str]:
"""Return the short id of this entry."""
return self._search_in_external_id(REGEXP_ATTR_SHORT_ID)
@property
def image_url(self) -> Optional[str]:
"""Return the image url of this entry."""
short_id = self._short_id()
if short_id:
return IMAGE_URL_PATTERN.format(short_id)
return None
| georss_ign_sismologia_client/__init__.py | 4,639 | IGN Sismología feed.
IGN Sismología feed entry.
Feed Manager for IGN Sismología feed.
Initialize the IGN Sismología Feed Manager.
Initialise this service.
Initialise this service.
Return string representation of this feed.
Filter the provided entries.
Generate a new entry.
Return the short id of this entry.
Return the attribution of this entry.
Return the image url of this entry.
Return the magnitude of this entry.
Return the published date of this entry.
Return the region of this entry.
IGN Instituto Geográfico Nacional Sismología Feed.
Fetches GeoRSS feed from IGN Instituto Geográfico Nacional Sismología.
Return only entries that have an actual magnitude value, and the value is equal or above the defined threshold. | 729 | en | 0.68694 |
''' convenience functions for ANOVA type analysis with OLS
Note: statistical results of ANOVA are not checked, OLS is
checked but not whether the reported results are the ones used
in ANOVA
includes form2design for creating dummy variables
TODO:
* ...
*
'''
import numpy as np
#from scipy import stats
import statsmodels.api as sm
def data2dummy(x, returnall=False):
'''convert array of categories to dummy variables
by default drops dummy variable for last category
uses ravel, 1d only'''
x = x.ravel()
groups = np.unique(x)
if returnall:
return (x[:, None] == groups).astype(int)
else:
return (x[:, None] == groups).astype(int)[:,:-1]
def data2proddummy(x):
'''creates product dummy variables from 2 columns of 2d array
drops last dummy variable, but not from each category
singular with simple dummy variable but not with constant
quickly written, no safeguards
'''
#brute force, assumes x is 2d
#replace with encoding if possible
groups = np.unique(map(tuple, x.tolist()))
#includes singularity with additive factors
return (x==groups[:,None,:]).all(-1).T.astype(int)[:,:-1]
def data2groupcont(x1,x2):
'''create dummy continuous variable
Parameters
----------
x1 : 1d array
label or group array
x2 : 1d array (float)
continuous variable
Notes
-----
useful for group specific slope coefficients in regression
'''
if x2.ndim == 1:
x2 = x2[:,None]
dummy = data2dummy(x1, returnall=True)
return dummy * x2
# Result strings
#the second leaves the constant in, not with NIST regression
#but something fishy with res.ess negative in examples ?
#not checked if these are all the right ones
anova_str0 = '''
ANOVA statistics (model sum of squares excludes constant)
Source DF Sum Squares Mean Square F Value Pr > F
Model %(df_model)i %(ess)f %(mse_model)f %(fvalue)f %(f_pvalue)f
Error %(df_resid)i %(ssr)f %(mse_resid)f
CTotal %(nobs)i %(uncentered_tss)f %(mse_total)f
R squared %(rsquared)f
'''
anova_str = '''
ANOVA statistics (model sum of squares includes constant)
Source DF Sum Squares Mean Square F Value Pr > F
Model %(df_model)i %(ssmwithmean)f %(mse_model)f %(fvalue)f %(f_pvalue)f
Error %(df_resid)i %(ssr)f %(mse_resid)f
CTotal %(nobs)i %(uncentered_tss)f %(mse_total)f
R squared %(rsquared)f
'''
def anovadict(res):
'''update regression results dictionary with ANOVA specific statistics
not checked for completeness
'''
ad = {}
ad.update(res.__dict__) #dict doesn't work with cached attributes
anova_attr = ['df_model', 'df_resid', 'ess', 'ssr','uncentered_tss',
'mse_model', 'mse_resid', 'mse_total', 'fvalue', 'f_pvalue',
'rsquared']
for key in anova_attr:
ad[key] = getattr(res, key)
ad['nobs'] = res.model.nobs
ad['ssmwithmean'] = res.uncentered_tss - res.ssr
return ad
def form2design(ss, data):
'''convert string formula to data dictionary
ss : string
* I : add constant
* varname : for simple varnames data is used as is
* F:varname : create dummy variables for factor varname
* P:varname1*varname2 : create product dummy variables for
varnames
* G:varname1*varname2 : create product between factor and
continuous variable
data : dict or structured array
data set, access of variables by name as in dictionaries
Returns
-------
vars : dictionary
dictionary of variables with converted dummy variables
names : list
list of names, product (P:) and grouped continuous
variables (G:) have name by joining individual names
sorted according to input
Examples
--------
>>> xx, n = form2design('I a F:b P:c*d G:c*f', testdata)
>>> xx.keys()
['a', 'b', 'const', 'cf', 'cd']
>>> n
['const', 'a', 'b', 'cd', 'cf']
Notes
-----
with sorted dict, separate name list wouldn't be necessary
'''
vars = {}
names = []
for item in ss.split():
if item == 'I':
vars['const'] = np.ones(data.shape[0])
names.append('const')
elif not ':' in item:
vars[item] = data[item]
names.append(item)
elif item[:2] == 'F:':
v = item.split(':')[1]
vars[v] = data2dummy(data[v])
names.append(v)
elif item[:2] == 'P:':
v = item.split(':')[1].split('*')
vars[''.join(v)] = data2proddummy(np.c_[data[v[0]],data[v[1]]])
names.append(''.join(v))
elif item[:2] == 'G:':
v = item.split(':')[1].split('*')
vars[''.join(v)] = data2groupcont(data[v[0]], data[v[1]])
names.append(''.join(v))
else:
raise ValueError('unknown expression in formula')
return vars, names
def dropname(ss, li):
'''drop names from a list of strings,
names to drop are in space delimeted list
does not change original list
'''
newli = li[:]
for item in ss.split():
newli.remove(item)
return newli
if __name__ == '__main__':
# Test Example with created data
# ------------------------------
nobs = 1000
testdataint = np.random.randint(3, size=(nobs,4)).view([('a',int),('b',int),('c',int),('d',int)])
testdatacont = np.random.normal( size=(nobs,2)).view([('e',float), ('f',float)])
import numpy.lib.recfunctions
dt2 = numpy.lib.recfunctions.zip_descr((testdataint, testdatacont),flatten=True)
# concatenate structured arrays
testdata = np.empty((nobs,1), dt2)
for name in testdataint.dtype.names:
testdata[name] = testdataint[name]
for name in testdatacont.dtype.names:
testdata[name] = testdatacont[name]
#print form2design('a',testdata)
if 0: # print only when nobs is small, e.g. nobs=10
xx, n = form2design('F:a',testdata)
print xx
print form2design('P:a*b',testdata)
print data2proddummy((np.c_[testdata['a'],testdata['b']]))
xx, names = form2design('a F:b P:c*d',testdata)
#xx, names = form2design('I a F:b F:c F:d P:c*d',testdata)
xx, names = form2design('I a F:b P:c*d', testdata)
xx, names = form2design('I a F:b P:c*d G:a*e f', testdata)
X = np.column_stack([xx[nn] for nn in names])
# simple test version: all coefficients equal to one
y = X.sum(1) + 0.01*np.random.normal(size=(nobs))
rest1 = sm.OLS(y,X).fit() #results
print rest1.params
print anova_str % anovadict(rest1)
X = np.column_stack([xx[nn] for nn in dropname('ae f', names)])
# simple test version: all coefficients equal to one
y = X.sum(1) + 0.01*np.random.normal(size=(nobs))
rest1 = sm.OLS(y,X).fit()
print rest1.params
print anova_str % anovadict(rest1)
# Example: from Bruce
# -------------------
#get data and clean it
#^^^^^^^^^^^^^^^^^^^^^
# requires file 'dftest3.data' posted by Bruce
# read data set and drop rows with missing data
dt_b = np.dtype([('breed', int), ('sex', int), ('litter', int),
('pen', int), ('pig', int), ('age', float),
('bage', float), ('y', float)])
dta = np.genfromtxt('dftest3.data', dt_b,missing='.', usemask=True)
print 'missing', [dta.mask[k].sum() for k in dta.dtype.names]
m = dta.mask.view(bool)
droprows = m.reshape(-1,len(dta.dtype.names)).any(1)
# get complete data as plain structured array
# maybe doesn't work with masked arrays
dta_use_b1 = dta[~droprows,:].data
print dta_use_b1.shape
print dta_use_b1.dtype
#Example b1: variables from Bruce's glm
#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
# prepare data and dummy variables
xx_b1, names_b1 = form2design('I F:sex age', dta_use_b1)
# create design matrix
X_b1 = np.column_stack([xx_b1[nn] for nn in dropname('', names_b1)])
y_b1 = dta_use_b1['y']
# estimate using OLS
rest_b1 = sm.OLS(y_b1, X_b1).fit()
# print results
print rest_b1.params
print anova_str % anovadict(rest_b1)
#compare with original version only in original version
#print anova_str % anovadict(res_b0)
# Example: use all variables except pig identifier
#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
allexog = ' '.join(dta.dtype.names[:-1])
#'breed sex litter pen pig age bage'
xx_b1a, names_b1a = form2design('I F:breed F:sex F:litter F:pen age bage', dta_use_b1)
X_b1a = np.column_stack([xx_b1a[nn] for nn in dropname('', names_b1a)])
y_b1a = dta_use_b1['y']
rest_b1a = sm.OLS(y_b1a, X_b1a).fit()
print rest_b1a.params
print anova_str % anovadict(rest_b1a)
for dropn in names_b1a:
print '\nResults dropping', dropn
X_b1a_ = np.column_stack([xx_b1a[nn] for nn in dropname(dropn, names_b1a)])
y_b1a_ = dta_use_b1['y']
rest_b1a_ = sm.OLS(y_b1a_, X_b1a_).fit()
#print rest_b1a_.params
print anova_str % anovadict(rest_b1a_)
| statsmodels/sandbox/regression/try_ols_anova.py | 9,151 | from scipy import statsbrute force, assumes x is 2dreplace with encoding if possibleincludes singularity with additive factors Result stringsthe second leaves the constant in, not with NIST regressionbut something fishy with res.ess negative in examples ?not checked if these are all the right onesdict doesn't work with cached attributes Test Example with created data ------------------------------ concatenate structured arraysprint form2design('a',testdata) print only when nobs is small, e.g. nobs=10xx, names = form2design('I a F:b F:c F:d P:c*d',testdata) simple test version: all coefficients equal to oneresults simple test version: all coefficients equal to one Example: from Bruce -------------------get data and clean it^^^^^^^^^^^^^^^^^^^^^ requires file 'dftest3.data' posted by Bruce read data set and drop rows with missing data get complete data as plain structured array maybe doesn't work with masked arraysExample b1: variables from Bruce's glm^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ prepare data and dummy variables create design matrix estimate using OLS print resultscompare with original version only in original versionprint anova_str % anovadict(res_b0) Example: use all variables except pig identifier^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^'breed sex litter pen pig age bage'print rest_b1a_.params | 1,333 | en | 0.589562 |
# Copyright 2020 Konstruktor, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import click
from tethys.apps.base import AppBase
@click.group()
def cli():
"""
The tethys CLI for managing your environment.
"""
@cli.group(name="apps")
def apps_entry():
"""
Tethys apps manager
"""
for app_cls in AppBase.get_apps():
add_click_entry = getattr(app_cls, "add_click_entry", None)
if add_click_entry:
add_click_entry(apps_entry)
| tethys/bin/cli.py | 994 | Tethys apps manager
The tethys CLI for managing your environment.
Copyright 2020 Konstruktor, Inc. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. | 643 | en | 0.852221 |
"""
This code was taken from https://github.com/ActiveState/appdirs and modified
to suite our purposes.
"""
import os
import sys
from pip._vendor import six
def user_cache_dir(appname):
r"""
Return full path to the user-specific cache dir for this application.
"appname" is the name of application.
Typical user cache directories are:
Mac OS X: ~/Library/Caches/<AppName>
Unix: ~/.cache/<AppName> (XDG default)
Windows: C:\Users\<username>\AppData\Local\<AppName>\Cache
On Windows the only suggestion in the MSDN docs is that local settings go
in the `CSIDL_LOCAL_APPDATA` directory. This is identical to the
non-roaming app data dir (the default returned by `user_data_dir`). Apps
typically put cache data somewhere *under* the given dir here. Some
examples:
...\Mozilla\Firefox\Profiles\<ProfileName>\Cache
...\Acme\SuperApp\Cache\1.0
OPINION: This function appends "Cache" to the `CSIDL_LOCAL_APPDATA` value.
"""
if sys.platform == "win32":
# Get the base path
path = os.path.normpath(_get_win_folder("CSIDL_LOCAL_APPDATA"))
# Add our app name and Cache directory to it
path = os.path.join(path, appname, "Cache")
elif sys.platform == "darwin":
# Get the base path
path = os.path.expanduser("~/Library/Caches")
# Add our app name to it
path = os.path.join(path, appname)
else:
# Get the base path
path = os.getenv("XDG_CACHE_HOME", os.path.expanduser("~/.cache"))
# Add our app name to it
path = os.path.join(path, appname)
return path
# -- Windows support functions --
def _get_win_folder_from_registry(csidl_name):
"""
This is a fallback technique at best. I'm not sure if using the
registry for this guarantees us the correct answer for all CSIDL_*
names.
"""
import _winreg
shell_folder_name = {
"CSIDL_APPDATA": "AppData",
"CSIDL_COMMON_APPDATA": "Common AppData",
"CSIDL_LOCAL_APPDATA": "Local AppData",
}[csidl_name]
key = _winreg.OpenKey(
_winreg.HKEY_CURRENT_USER,
r"Software\Microsoft\Windows\CurrentVersion\Explorer\Shell Folders"
)
directory, _type = _winreg.QueryValueEx(key, shell_folder_name)
return directory
def _get_win_folder_with_pywin32(csidl_name):
from win32com.shell import shellcon, shell
directory = shell.SHGetFolderPath(0, getattr(shellcon, csidl_name), 0, 0)
# Try to make this a unicode path because SHGetFolderPath does
# not return unicode strings when there is unicode data in the
# path.
try:
directory = six.text_type(directory)
# Downgrade to short path name if have highbit chars. See
# <http://bugs.activestate.com/show_bug.cgi?id=85099>.
has_high_char = False
for c in directory:
if ord(c) > 255:
has_high_char = True
break
if has_high_char:
try:
import win32api
directory = win32api.GetShortPathName(directory)
except ImportError:
pass
except UnicodeError:
pass
return directory
def _get_win_folder_with_ctypes(csidl_name):
csidl_const = {
"CSIDL_APPDATA": 26,
"CSIDL_COMMON_APPDATA": 35,
"CSIDL_LOCAL_APPDATA": 28,
}[csidl_name]
buf = ctypes.create_unicode_buffer(1024)
ctypes.windll.shell32.SHGetFolderPathW(None, csidl_const, None, 0, buf)
# Downgrade to short path name if have highbit chars. See
# <http://bugs.activestate.com/show_bug.cgi?id=85099>.
has_high_char = False
for c in buf:
if ord(c) > 255:
has_high_char = True
break
if has_high_char:
buf2 = ctypes.create_unicode_buffer(1024)
if ctypes.windll.kernel32.GetShortPathNameW(buf.value, buf2, 1024):
buf = buf2
return buf.value
if sys.platform == "win32":
try:
import win32com.shell # noqa
_get_win_folder = _get_win_folder_with_pywin32
except ImportError:
try:
import ctypes
_get_win_folder = _get_win_folder_with_ctypes
except ImportError:
_get_win_folder = _get_win_folder_from_registry
| pip/appdirs.py | 4,328 | This is a fallback technique at best. I'm not sure if using the
registry for this guarantees us the correct answer for all CSIDL_*
names.
Return full path to the user-specific cache dir for this application.
"appname" is the name of application.
Typical user cache directories are:
Mac OS X: ~/Library/Caches/<AppName>
Unix: ~/.cache/<AppName> (XDG default)
Windows: C:\Users\<username>\AppData\Local\<AppName>\Cache
On Windows the only suggestion in the MSDN docs is that local settings go
in the `CSIDL_LOCAL_APPDATA` directory. This is identical to the
non-roaming app data dir (the default returned by `user_data_dir`). Apps
typically put cache data somewhere *under* the given dir here. Some
examples:
...\Mozilla\Firefox\Profiles\<ProfileName>\Cache
...\Acme\SuperApp\Cache\1.0
OPINION: This function appends "Cache" to the `CSIDL_LOCAL_APPDATA` value.
This code was taken from https://github.com/ActiveState/appdirs and modified
to suite our purposes.
Get the base path Add our app name and Cache directory to it Get the base path Add our app name to it Get the base path Add our app name to it -- Windows support functions -- Try to make this a unicode path because SHGetFolderPath does not return unicode strings when there is unicode data in the path. Downgrade to short path name if have highbit chars. See <http://bugs.activestate.com/show_bug.cgi?id=85099>. Downgrade to short path name if have highbit chars. See <http://bugs.activestate.com/show_bug.cgi?id=85099>. noqa | 1,527 | en | 0.728566 |
# https://github.com/theeko74/pdfc
# modified by brio50 on 2022/01/23, working with gs version 9.54.0
"""
Simple python wrapper script to use ghoscript function to compress PDF files.
Compression levels:
0: default
1: prepress
2: printer
3: ebook
4: screen
Dependency: Ghostscript.
On MacOSX install via command line `brew install ghostscript`.
"""
import argparse
import subprocess
import os.path
import sys
import shutil
def compress(input_file_path, output_file_path, level=0, method=1):
"""Function to compress PDF via Ghostscript command line interface"""
quality = {
0: '/default',
1: '/prepress',
2: '/printer',
3: '/ebook',
4: '/screen'
}
# Check if valid path
if not os.path.isfile(input_file_path):
print(f"Error: invalid path for input file: {input_file_path}")
sys.exit(1)
# Check if file is a PDF by extension
if input_file_path.split('.')[-1].lower() != 'pdf': # not sure this is the most robust solution
print(f"Error: input file is not a PDF: {input_file_path}")
sys.exit(1)
gs = get_ghostscript_path()
file_name = input_file_path.split('/')[-1] # everything after last '/'
print("Compressing PDF \"{}\"...".format(file_name))
if method == 1:
# https://gist.github.com/lkraider/f0888da30bc352f9d167dfa4f4fc8213
cmd = [gs, '-sDEVICE=pdfwrite',
'-dNumRenderingThreads=2',
'-dPDFSETTINGS={}'.format(quality[level]),
'-dCompatibilityLevel=1.5',
'-dNOPAUSE', '-dQUIET', '-dBATCH', '-dSAFER',
# font settings
'-dSubsetFonts=true',
'-dCompressFonts=true',
'-dEmbedAllFonts=true',
# color format`
'-sProcessColorModel=DeviceRGB',
'-sColorConversionStrategy=RGB',
'-sColorConversionStrategyForImages=RGB',
'-dConvertCMYKImagesToRGB=true',
# image resample
'-dDetectDuplicateImages=true',
'-dColorImageDownsampleType=/Bicubic',
'-dColorImageResolution=300',
'-dGrayImageDownsampleType=/Bicubic',
'-dGrayImageResolution=300',
'-dMonoImageDownsampleType=/Subsample',
'-dMonoImageResolution=300',
'-dDownsampleColorImages=true',
# preset overrides
'-dDoThumbnails=false',
'-dCreateJobTicket=false',
'-dPreserveEPSInfo=false',
'-dPreserveOPIComments=false',
'-dPreserveOverprintSettings=false',
'-dUCRandBGInfo=/Remove',
'-sOutputFile={}'.format(output_file_path),
input_file_path]
elif method == 2:
cmd = [gs, '-sDEVICE=pdfwrite',
'-dNumRenderingThreads=2',
'-dPDFSETTINGS={}'.format(quality[level]),
'-dCompatibilityLevel=1.4',
'-dNOPAUSE', '-dQUIET', '-dBATCH', '-dSAFER',
'-dDetectDuplicateImages=true',
'-sOutputFile={}'.format(output_file_path),
input_file_path]
try:
# execute
subprocess.call(cmd, stderr=sys.stdout)
except:
# print ghostscript command for debug
print(" ".join(cmd))
if not os.path.exists(output_file_path):
raise Exception(f"Ghostscript failed to create {output_file_path}, time to debug...\n",
" ".join(cmd))
initial_size = round(os.path.getsize(input_file_path) / (1024 * 1024), 2)
final_size = round(os.path.getsize(output_file_path) / (1024 * 1024), 2)
ratio = round(100 - ((final_size / initial_size) * 100), 1)
print(f"Initial file size is {initial_size}MB",
f"; Final file size is {final_size}MB",
f"; Compression Ratio = {ratio}%\n")
if final_size > initial_size and method == 1:
print('-' * 100)
print('Compression Failed\nTrying another ghostscript compression method...')
print('-' * 100)
info = compress(input_file_path, output_file_path, 4, 2)
initial_size = info[0]
final_size = info[1]
ratio = info[2]
return [initial_size, final_size, ratio]
def get_ghostscript_path():
gs_names = ['gs', 'gswin32', 'gswin64']
for name in gs_names:
if shutil.which(name):
return shutil.which(name)
raise FileNotFoundError(f'No GhostScript executable was found on path ({"/".join(gs_names)})')
def main():
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter
)
parser.add_argument('input', help='Relative or absolute path of the input PDF file')
parser.add_argument('-o', '--out', help='Relative or absolute path of the output PDF file')
parser.add_argument('-c', '--compress', type=int, help='Compression level from 0 to 4')
parser.add_argument('-b', '--backup', action='store_true', help="Backup the old PDF file")
parser.add_argument('--open', action='store_true', default=False,
help='Open PDF after compression')
args = parser.parse_args()
# In case no compression level is specified, default is 2 '/ printer'
if not args.compress:
args.compress = 2
# In case no output file is specified, store in temp file
if not args.out:
args.out = 'temp.pdf'
# Run
compress(args.input, args.out, power=args.compress)
# In case no output file is specified, erase original file
if args.out == 'temp.pdf':
if args.backup:
shutil.copyfile(args.input, args.input.replace(".pdf", "_BACKUP.pdf"))
shutil.copyfile(args.out, args.input)
os.remove(args.out)
# In case we want to open the file after compression
if args.open:
if args.out == 'temp.pdf' and args.backup:
subprocess.call(['open', args.input])
else:
subprocess.call(['open', args.out])
if __name__ == '__main__':
main()
| gs_compress.py | 6,123 | Function to compress PDF via Ghostscript command line interface
Simple python wrapper script to use ghoscript function to compress PDF files.
Compression levels:
0: default
1: prepress
2: printer
3: ebook
4: screen
Dependency: Ghostscript.
On MacOSX install via command line `brew install ghostscript`.
https://github.com/theeko74/pdfc modified by brio50 on 2022/01/23, working with gs version 9.54.0 Check if valid path Check if file is a PDF by extension not sure this is the most robust solution everything after last '/' https://gist.github.com/lkraider/f0888da30bc352f9d167dfa4f4fc8213 font settings color format` image resample preset overrides execute print ghostscript command for debug In case no compression level is specified, default is 2 '/ printer' In case no output file is specified, store in temp file Run In case no output file is specified, erase original file In case we want to open the file after compression | 954 | en | 0.727137 |
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from collections import OrderedDict
from distutils import util
import os
import re
from typing import Dict, Optional, Sequence, Tuple, Type, Union
import pkg_resources
from google.api_core import client_options as client_options_lib # type: ignore
from google.api_core import exceptions as core_exceptions # type: ignore
from google.api_core import gapic_v1 # type: ignore
from google.api_core import retry as retries # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport import mtls # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
from google.auth.exceptions import MutualTLSChannelError # type: ignore
from google.oauth2 import service_account # type: ignore
from google.api import httpbody_pb2 # type: ignore
from google.api_core import operation # type: ignore
from google.api_core import operation_async # type: ignore
from google.cloud.recommendationengine_v1beta1.services.user_event_service import pagers
from google.cloud.recommendationengine_v1beta1.types import import_
from google.cloud.recommendationengine_v1beta1.types import user_event
from google.cloud.recommendationengine_v1beta1.types import user_event as gcr_user_event
from google.cloud.recommendationengine_v1beta1.types import user_event_service
from google.protobuf import any_pb2 # type: ignore
from google.protobuf import timestamp_pb2 # type: ignore
from .transports.base import UserEventServiceTransport, DEFAULT_CLIENT_INFO
from .transports.grpc import UserEventServiceGrpcTransport
from .transports.grpc_asyncio import UserEventServiceGrpcAsyncIOTransport
class UserEventServiceClientMeta(type):
"""Metaclass for the UserEventService client.
This provides class-level methods for building and retrieving
support objects (e.g. transport) without polluting the client instance
objects.
"""
_transport_registry = OrderedDict() # type: Dict[str, Type[UserEventServiceTransport]]
_transport_registry["grpc"] = UserEventServiceGrpcTransport
_transport_registry["grpc_asyncio"] = UserEventServiceGrpcAsyncIOTransport
def get_transport_class(cls,
label: str = None,
) -> Type[UserEventServiceTransport]:
"""Returns an appropriate transport class.
Args:
label: The name of the desired transport. If none is
provided, then the first transport in the registry is used.
Returns:
The transport class to use.
"""
# If a specific transport is requested, return that one.
if label:
return cls._transport_registry[label]
# No transport is requested; return the default (that is, the first one
# in the dictionary).
return next(iter(cls._transport_registry.values()))
class UserEventServiceClient(metaclass=UserEventServiceClientMeta):
"""Service for ingesting end user actions on the customer
website.
"""
@staticmethod
def _get_default_mtls_endpoint(api_endpoint):
"""Converts api endpoint to mTLS endpoint.
Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to
"*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively.
Args:
api_endpoint (Optional[str]): the api endpoint to convert.
Returns:
str: converted mTLS api endpoint.
"""
if not api_endpoint:
return api_endpoint
mtls_endpoint_re = re.compile(
r"(?P<name>[^.]+)(?P<mtls>\.mtls)?(?P<sandbox>\.sandbox)?(?P<googledomain>\.googleapis\.com)?"
)
m = mtls_endpoint_re.match(api_endpoint)
name, mtls, sandbox, googledomain = m.groups()
if mtls or not googledomain:
return api_endpoint
if sandbox:
return api_endpoint.replace(
"sandbox.googleapis.com", "mtls.sandbox.googleapis.com"
)
return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com")
DEFAULT_ENDPOINT = "recommendationengine.googleapis.com"
DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore
DEFAULT_ENDPOINT
)
@classmethod
def from_service_account_info(cls, info: dict, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
info.
Args:
info (dict): The service account private key info.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
UserEventServiceClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_info(info)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
@classmethod
def from_service_account_file(cls, filename: str, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
file.
Args:
filename (str): The path to the service account private key json
file.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
UserEventServiceClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_file(
filename)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
from_service_account_json = from_service_account_file
@property
def transport(self) -> UserEventServiceTransport:
"""Returns the transport used by the client instance.
Returns:
UserEventServiceTransport: The transport used by the client
instance.
"""
return self._transport
@staticmethod
def event_store_path(project: str,location: str,catalog: str,event_store: str,) -> str:
"""Returns a fully-qualified event_store string."""
return "projects/{project}/locations/{location}/catalogs/{catalog}/eventStores/{event_store}".format(project=project, location=location, catalog=catalog, event_store=event_store, )
@staticmethod
def parse_event_store_path(path: str) -> Dict[str,str]:
"""Parses a event_store path into its component segments."""
m = re.match(r"^projects/(?P<project>.+?)/locations/(?P<location>.+?)/catalogs/(?P<catalog>.+?)/eventStores/(?P<event_store>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_billing_account_path(billing_account: str, ) -> str:
"""Returns a fully-qualified billing_account string."""
return "billingAccounts/{billing_account}".format(billing_account=billing_account, )
@staticmethod
def parse_common_billing_account_path(path: str) -> Dict[str,str]:
"""Parse a billing_account path into its component segments."""
m = re.match(r"^billingAccounts/(?P<billing_account>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_folder_path(folder: str, ) -> str:
"""Returns a fully-qualified folder string."""
return "folders/{folder}".format(folder=folder, )
@staticmethod
def parse_common_folder_path(path: str) -> Dict[str,str]:
"""Parse a folder path into its component segments."""
m = re.match(r"^folders/(?P<folder>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_organization_path(organization: str, ) -> str:
"""Returns a fully-qualified organization string."""
return "organizations/{organization}".format(organization=organization, )
@staticmethod
def parse_common_organization_path(path: str) -> Dict[str,str]:
"""Parse a organization path into its component segments."""
m = re.match(r"^organizations/(?P<organization>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_project_path(project: str, ) -> str:
"""Returns a fully-qualified project string."""
return "projects/{project}".format(project=project, )
@staticmethod
def parse_common_project_path(path: str) -> Dict[str,str]:
"""Parse a project path into its component segments."""
m = re.match(r"^projects/(?P<project>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_location_path(project: str, location: str, ) -> str:
"""Returns a fully-qualified location string."""
return "projects/{project}/locations/{location}".format(project=project, location=location, )
@staticmethod
def parse_common_location_path(path: str) -> Dict[str,str]:
"""Parse a location path into its component segments."""
m = re.match(r"^projects/(?P<project>.+?)/locations/(?P<location>.+?)$", path)
return m.groupdict() if m else {}
def __init__(self, *,
credentials: Optional[ga_credentials.Credentials] = None,
transport: Union[str, UserEventServiceTransport, None] = None,
client_options: Optional[client_options_lib.ClientOptions] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiates the user event service client.
Args:
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
transport (Union[str, UserEventServiceTransport]): The
transport to use. If set to None, a transport is chosen
automatically.
client_options (google.api_core.client_options.ClientOptions): Custom options for the
client. It won't take effect if a ``transport`` instance is provided.
(1) The ``api_endpoint`` property can be used to override the
default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT
environment variable can also be used to override the endpoint:
"always" (always use the default mTLS endpoint), "never" (always
use the default regular endpoint) and "auto" (auto switch to the
default mTLS endpoint if client certificate is present, this is
the default value). However, the ``api_endpoint`` property takes
precedence if provided.
(2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable
is "true", then the ``client_cert_source`` property can be used
to provide client certificate for mutual TLS transport. If
not provided, the default SSL client certificate will be used if
present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not
set, no client certificate will be used.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
Raises:
google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
creation failed for any reason.
"""
if isinstance(client_options, dict):
client_options = client_options_lib.from_dict(client_options)
if client_options is None:
client_options = client_options_lib.ClientOptions()
# Create SSL credentials for mutual TLS if needed.
use_client_cert = bool(util.strtobool(os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false")))
client_cert_source_func = None
is_mtls = False
if use_client_cert:
if client_options.client_cert_source:
is_mtls = True
client_cert_source_func = client_options.client_cert_source
else:
is_mtls = mtls.has_default_client_cert_source()
if is_mtls:
client_cert_source_func = mtls.default_client_cert_source()
else:
client_cert_source_func = None
# Figure out which api endpoint to use.
if client_options.api_endpoint is not None:
api_endpoint = client_options.api_endpoint
else:
use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto")
if use_mtls_env == "never":
api_endpoint = self.DEFAULT_ENDPOINT
elif use_mtls_env == "always":
api_endpoint = self.DEFAULT_MTLS_ENDPOINT
elif use_mtls_env == "auto":
if is_mtls:
api_endpoint = self.DEFAULT_MTLS_ENDPOINT
else:
api_endpoint = self.DEFAULT_ENDPOINT
else:
raise MutualTLSChannelError(
"Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted "
"values: never, auto, always"
)
# Save or instantiate the transport.
# Ordinarily, we provide the transport, but allowing a custom transport
# instance provides an extensibility point for unusual situations.
if isinstance(transport, UserEventServiceTransport):
# transport is a UserEventServiceTransport instance.
if credentials or client_options.credentials_file:
raise ValueError("When providing a transport instance, "
"provide its credentials directly.")
if client_options.scopes:
raise ValueError(
"When providing a transport instance, provide its scopes "
"directly."
)
self._transport = transport
else:
Transport = type(self).get_transport_class(transport)
self._transport = Transport(
credentials=credentials,
credentials_file=client_options.credentials_file,
host=api_endpoint,
scopes=client_options.scopes,
client_cert_source_for_mtls=client_cert_source_func,
quota_project_id=client_options.quota_project_id,
client_info=client_info,
always_use_jwt_access=True,
)
def write_user_event(self,
request: Union[user_event_service.WriteUserEventRequest, dict] = None,
*,
parent: str = None,
user_event: gcr_user_event.UserEvent = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> gcr_user_event.UserEvent:
r"""Writes a single user event.
Args:
request (Union[google.cloud.recommendationengine_v1beta1.types.WriteUserEventRequest, dict]):
The request object. Request message for WriteUserEvent
method.
parent (str):
Required. The parent eventStore resource name, such as
``projects/1234/locations/global/catalogs/default_catalog/eventStores/default_event_store``.
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
user_event (google.cloud.recommendationengine_v1beta1.types.UserEvent):
Required. User event to write.
This corresponds to the ``user_event`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.recommendationengine_v1beta1.types.UserEvent:
UserEvent captures all metadata
information recommendation engine needs
to know about how end users interact
with customers' website.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent, user_event])
if request is not None and has_flattened_params:
raise ValueError('If the `request` argument is set, then none of '
'the individual field arguments should be set.')
# Minor optimization to avoid making a copy if the user passes
# in a user_event_service.WriteUserEventRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, user_event_service.WriteUserEventRequest):
request = user_event_service.WriteUserEventRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
if user_event is not None:
request.user_event = user_event
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.write_user_event]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((
("parent", request.parent),
)),
)
# Send the request.
response = rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Done; return the response.
return response
def collect_user_event(self,
request: Union[user_event_service.CollectUserEventRequest, dict] = None,
*,
parent: str = None,
user_event: str = None,
uri: str = None,
ets: int = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> httpbody_pb2.HttpBody:
r"""Writes a single user event from the browser. This
uses a GET request to due to browser restriction of
POST-ing to a 3rd party domain.
This method is used only by the Recommendations AI
JavaScript pixel. Users should not call this method
directly.
Args:
request (Union[google.cloud.recommendationengine_v1beta1.types.CollectUserEventRequest, dict]):
The request object. Request message for CollectUserEvent
method.
parent (str):
Required. The parent eventStore name, such as
``projects/1234/locations/global/catalogs/default_catalog/eventStores/default_event_store``.
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
user_event (str):
Required. URL encoded UserEvent
proto.
This corresponds to the ``user_event`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
uri (str):
Optional. The url including cgi-
arameters but excluding the hash
fragment. The URL must be truncated to
1.5K bytes to conservatively be under
the 2K bytes. This is often more useful
than the referer url, because many
browsers only send the domain for 3rd
party requests.
This corresponds to the ``uri`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
ets (int):
Optional. The event timestamp in
milliseconds. This prevents browser
caching of otherwise identical get
requests. The name is abbreviated to
reduce the payload bytes.
This corresponds to the ``ets`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.api.httpbody_pb2.HttpBody:
Message that represents an arbitrary HTTP body. It should only be used for
payload formats that can't be represented as JSON,
such as raw binary or an HTML page.
This message can be used both in streaming and
non-streaming API methods in the request as well as
the response.
It can be used as a top-level request field, which is
convenient if one wants to extract parameters from
either the URL or HTTP template into the request
fields and also want access to the raw HTTP body.
Example:
message GetResourceRequest {
// A unique request id. string request_id = 1;
// The raw HTTP body is bound to this field.
google.api.HttpBody http_body = 2;
}
service ResourceService {
rpc GetResource(GetResourceRequest)
returns (google.api.HttpBody);
rpc UpdateResource(google.api.HttpBody)
returns (google.protobuf.Empty);
}
Example with streaming methods:
service CaldavService {
rpc GetCalendar(stream google.api.HttpBody)
returns (stream google.api.HttpBody);
rpc UpdateCalendar(stream google.api.HttpBody)
returns (stream google.api.HttpBody);
}
Use of this type only changes how the request and
response bodies are handled, all other features will
continue to work unchanged.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent, user_event, uri, ets])
if request is not None and has_flattened_params:
raise ValueError('If the `request` argument is set, then none of '
'the individual field arguments should be set.')
# Minor optimization to avoid making a copy if the user passes
# in a user_event_service.CollectUserEventRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, user_event_service.CollectUserEventRequest):
request = user_event_service.CollectUserEventRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
if user_event is not None:
request.user_event = user_event
if uri is not None:
request.uri = uri
if ets is not None:
request.ets = ets
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.collect_user_event]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((
("parent", request.parent),
)),
)
# Send the request.
response = rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Done; return the response.
return response
def list_user_events(self,
request: Union[user_event_service.ListUserEventsRequest, dict] = None,
*,
parent: str = None,
filter: str = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> pagers.ListUserEventsPager:
r"""Gets a list of user events within a time range, with
potential filtering.
Args:
request (Union[google.cloud.recommendationengine_v1beta1.types.ListUserEventsRequest, dict]):
The request object. Request message for ListUserEvents
method.
parent (str):
Required. The parent eventStore resource name, such as
``projects/*/locations/*/catalogs/default_catalog/eventStores/default_event_store``.
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
filter (str):
Optional. Filtering expression to specify restrictions
over returned events. This is a sequence of terms, where
each term applies some kind of a restriction to the
returned user events. Use this expression to restrict
results to a specific time range, or filter events by
eventType. eg: eventTime > "2012-04-23T18:25:43.511Z"
eventsMissingCatalogItems
eventTime<"2012-04-23T18:25:43.511Z" eventType=search
We expect only 3 types of fields:
::
* eventTime: this can be specified a maximum of 2 times, once with a
less than operator and once with a greater than operator. The
eventTime restrict should result in one contiguous valid eventTime
range.
* eventType: only 1 eventType restriction can be specified.
* eventsMissingCatalogItems: specififying this will restrict results
to events for which catalog items were not found in the catalog. The
default behavior is to return only those events for which catalog
items were found.
Some examples of valid filters expressions:
- Example 1: eventTime > "2012-04-23T18:25:43.511Z"
eventTime < "2012-04-23T18:30:43.511Z"
- Example 2: eventTime > "2012-04-23T18:25:43.511Z"
eventType = detail-page-view
- Example 3: eventsMissingCatalogItems eventType =
search eventTime < "2018-04-23T18:30:43.511Z"
- Example 4: eventTime > "2012-04-23T18:25:43.511Z"
- Example 5: eventType = search
- Example 6: eventsMissingCatalogItems
This corresponds to the ``filter`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.recommendationengine_v1beta1.services.user_event_service.pagers.ListUserEventsPager:
Response message for ListUserEvents
method.
Iterating over this object will yield
results and resolve additional pages
automatically.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent, filter])
if request is not None and has_flattened_params:
raise ValueError('If the `request` argument is set, then none of '
'the individual field arguments should be set.')
# Minor optimization to avoid making a copy if the user passes
# in a user_event_service.ListUserEventsRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, user_event_service.ListUserEventsRequest):
request = user_event_service.ListUserEventsRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
if filter is not None:
request.filter = filter
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.list_user_events]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((
("parent", request.parent),
)),
)
# Send the request.
response = rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# This method is paged; wrap the response in a pager, which provides
# an `__iter__` convenience method.
response = pagers.ListUserEventsPager(
method=rpc,
request=request,
response=response,
metadata=metadata,
)
# Done; return the response.
return response
def purge_user_events(self,
request: Union[user_event_service.PurgeUserEventsRequest, dict] = None,
*,
parent: str = None,
filter: str = None,
force: bool = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> operation.Operation:
r"""Deletes permanently all user events specified by the
filter provided. Depending on the number of events
specified by the filter, this operation could take hours
or days to complete. To test a filter, use the list
command first.
Args:
request (Union[google.cloud.recommendationengine_v1beta1.types.PurgeUserEventsRequest, dict]):
The request object. Request message for PurgeUserEvents
method.
parent (str):
Required. The resource name of the event_store under
which the events are created. The format is
``projects/${projectId}/locations/global/catalogs/${catalogId}/eventStores/${eventStoreId}``
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
filter (str):
Required. The filter string to specify the events to be
deleted. Empty string filter is not allowed. This filter
can also be used with ListUserEvents API to list events
that will be deleted. The eligible fields for filtering
are:
- eventType - UserEvent.eventType field of type string.
- eventTime - in ISO 8601 "zulu" format.
- visitorId - field of type string. Specifying this
will delete all events associated with a visitor.
- userId - field of type string. Specifying this will
delete all events associated with a user. Example 1:
Deleting all events in a time range.
``eventTime > "2012-04-23T18:25:43.511Z" eventTime < "2012-04-23T18:30:43.511Z"``
Example 2: Deleting specific eventType in time range.
``eventTime > "2012-04-23T18:25:43.511Z" eventType = "detail-page-view"``
Example 3: Deleting all events for a specific visitor
``visitorId = visitor1024`` The filtering fields are
assumed to have an implicit AND.
This corresponds to the ``filter`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
force (bool):
Optional. The default value is false.
Override this flag to true to actually
perform the purge. If the field is not
set to true, a sampling of events to be
deleted will be returned.
This corresponds to the ``force`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.api_core.operation.Operation:
An object representing a long-running operation.
The result type for the operation will be :class:`google.cloud.recommendationengine_v1beta1.types.PurgeUserEventsResponse` Response of the PurgeUserEventsRequest. If the long running operation is
successfully done, then this message is returned by
the google.longrunning.Operations.response field.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent, filter, force])
if request is not None and has_flattened_params:
raise ValueError('If the `request` argument is set, then none of '
'the individual field arguments should be set.')
# Minor optimization to avoid making a copy if the user passes
# in a user_event_service.PurgeUserEventsRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, user_event_service.PurgeUserEventsRequest):
request = user_event_service.PurgeUserEventsRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
if filter is not None:
request.filter = filter
if force is not None:
request.force = force
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.purge_user_events]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((
("parent", request.parent),
)),
)
# Send the request.
response = rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Wrap the response in an operation future.
response = operation.from_gapic(
response,
self._transport.operations_client,
user_event_service.PurgeUserEventsResponse,
metadata_type=user_event_service.PurgeUserEventsMetadata,
)
# Done; return the response.
return response
def import_user_events(self,
request: Union[import_.ImportUserEventsRequest, dict] = None,
*,
parent: str = None,
request_id: str = None,
input_config: import_.InputConfig = None,
errors_config: import_.ImportErrorsConfig = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> operation.Operation:
r"""Bulk import of User events. Request processing might
be synchronous. Events that already exist are skipped.
Use this method for backfilling historical user events.
Operation.response is of type ImportResponse. Note that
it is possible for a subset of the items to be
successfully inserted. Operation.metadata is of type
ImportMetadata.
Args:
request (Union[google.cloud.recommendationengine_v1beta1.types.ImportUserEventsRequest, dict]):
The request object. Request message for the
ImportUserEvents request.
parent (str):
Required.
``projects/1234/locations/global/catalogs/default_catalog/eventStores/default_event_store``
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
request_id (str):
Optional. Unique identifier provided by client, within
the ancestor dataset scope. Ensures idempotency for
expensive long running operations. Server-generated if
unspecified. Up to 128 characters long. This is returned
as google.longrunning.Operation.name in the response.
Note that this field must not be set if the desired
input config is catalog_inline_source.
This corresponds to the ``request_id`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
input_config (google.cloud.recommendationengine_v1beta1.types.InputConfig):
Required. The desired input location
of the data.
This corresponds to the ``input_config`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
errors_config (google.cloud.recommendationengine_v1beta1.types.ImportErrorsConfig):
Optional. The desired location of
errors incurred during the Import.
This corresponds to the ``errors_config`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.api_core.operation.Operation:
An object representing a long-running operation.
The result type for the operation will be :class:`google.cloud.recommendationengine_v1beta1.types.ImportUserEventsResponse` Response of the ImportUserEventsRequest. If the long running
operation was successful, then this message is
returned by the
google.longrunning.Operations.response field if the
operation was successful.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent, request_id, input_config, errors_config])
if request is not None and has_flattened_params:
raise ValueError('If the `request` argument is set, then none of '
'the individual field arguments should be set.')
# Minor optimization to avoid making a copy if the user passes
# in a import_.ImportUserEventsRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, import_.ImportUserEventsRequest):
request = import_.ImportUserEventsRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
if request_id is not None:
request.request_id = request_id
if input_config is not None:
request.input_config = input_config
if errors_config is not None:
request.errors_config = errors_config
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.import_user_events]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((
("parent", request.parent),
)),
)
# Send the request.
response = rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Wrap the response in an operation future.
response = operation.from_gapic(
response,
self._transport.operations_client,
import_.ImportUserEventsResponse,
metadata_type=import_.ImportMetadata,
)
# Done; return the response.
return response
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
"""Releases underlying transport's resources.
.. warning::
ONLY use as a context manager if the transport is NOT shared
with other clients! Exiting the with block will CLOSE the transport
and may cause errors in other clients!
"""
self.transport.close()
try:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
gapic_version=pkg_resources.get_distribution(
"google-cloud-recommendations-ai",
).version,
)
except pkg_resources.DistributionNotFound:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo()
__all__ = (
"UserEventServiceClient",
)
| google/cloud/recommendationengine/v1beta1/recommendationengine-v1beta1-py/google/cloud/recommendationengine_v1beta1/services/user_event_service/client.py | 45,048 | Service for ingesting end user actions on the customer
website.
Metaclass for the UserEventService client.
This provides class-level methods for building and retrieving
support objects (e.g. transport) without polluting the client instance
objects.
Releases underlying transport's resources.
.. warning::
ONLY use as a context manager if the transport is NOT shared
with other clients! Exiting the with block will CLOSE the transport
and may cause errors in other clients!
Instantiates the user event service client.
Args:
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
transport (Union[str, UserEventServiceTransport]): The
transport to use. If set to None, a transport is chosen
automatically.
client_options (google.api_core.client_options.ClientOptions): Custom options for the
client. It won't take effect if a ``transport`` instance is provided.
(1) The ``api_endpoint`` property can be used to override the
default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT
environment variable can also be used to override the endpoint:
"always" (always use the default mTLS endpoint), "never" (always
use the default regular endpoint) and "auto" (auto switch to the
default mTLS endpoint if client certificate is present, this is
the default value). However, the ``api_endpoint`` property takes
precedence if provided.
(2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable
is "true", then the ``client_cert_source`` property can be used
to provide client certificate for mutual TLS transport. If
not provided, the default SSL client certificate will be used if
present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not
set, no client certificate will be used.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
Raises:
google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
creation failed for any reason.
Converts api endpoint to mTLS endpoint.
Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to
"*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively.
Args:
api_endpoint (Optional[str]): the api endpoint to convert.
Returns:
str: converted mTLS api endpoint.
Writes a single user event from the browser. This
uses a GET request to due to browser restriction of
POST-ing to a 3rd party domain.
This method is used only by the Recommendations AI
JavaScript pixel. Users should not call this method
directly.
Args:
request (Union[google.cloud.recommendationengine_v1beta1.types.CollectUserEventRequest, dict]):
The request object. Request message for CollectUserEvent
method.
parent (str):
Required. The parent eventStore name, such as
``projects/1234/locations/global/catalogs/default_catalog/eventStores/default_event_store``.
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
user_event (str):
Required. URL encoded UserEvent
proto.
This corresponds to the ``user_event`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
uri (str):
Optional. The url including cgi-
arameters but excluding the hash
fragment. The URL must be truncated to
1.5K bytes to conservatively be under
the 2K bytes. This is often more useful
than the referer url, because many
browsers only send the domain for 3rd
party requests.
This corresponds to the ``uri`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
ets (int):
Optional. The event timestamp in
milliseconds. This prevents browser
caching of otherwise identical get
requests. The name is abbreviated to
reduce the payload bytes.
This corresponds to the ``ets`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.api.httpbody_pb2.HttpBody:
Message that represents an arbitrary HTTP body. It should only be used for
payload formats that can't be represented as JSON,
such as raw binary or an HTML page.
This message can be used both in streaming and
non-streaming API methods in the request as well as
the response.
It can be used as a top-level request field, which is
convenient if one wants to extract parameters from
either the URL or HTTP template into the request
fields and also want access to the raw HTTP body.
Example:
message GetResourceRequest {
// A unique request id. string request_id = 1;
// The raw HTTP body is bound to this field.
google.api.HttpBody http_body = 2;
}
service ResourceService {
rpc GetResource(GetResourceRequest)
returns (google.api.HttpBody);
rpc UpdateResource(google.api.HttpBody)
returns (google.protobuf.Empty);
}
Example with streaming methods:
service CaldavService {
rpc GetCalendar(stream google.api.HttpBody)
returns (stream google.api.HttpBody);
rpc UpdateCalendar(stream google.api.HttpBody)
returns (stream google.api.HttpBody);
}
Use of this type only changes how the request and
response bodies are handled, all other features will
continue to work unchanged.
Returns a fully-qualified billing_account string.
Returns a fully-qualified folder string.
Returns a fully-qualified location string.
Returns a fully-qualified organization string.
Returns a fully-qualified project string.
Returns a fully-qualified event_store string.
Creates an instance of this client using the provided credentials
file.
Args:
filename (str): The path to the service account private key json
file.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
UserEventServiceClient: The constructed client.
Creates an instance of this client using the provided credentials
info.
Args:
info (dict): The service account private key info.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
UserEventServiceClient: The constructed client.
Returns an appropriate transport class.
Args:
label: The name of the desired transport. If none is
provided, then the first transport in the registry is used.
Returns:
The transport class to use.
Bulk import of User events. Request processing might
be synchronous. Events that already exist are skipped.
Use this method for backfilling historical user events.
Operation.response is of type ImportResponse. Note that
it is possible for a subset of the items to be
successfully inserted. Operation.metadata is of type
ImportMetadata.
Args:
request (Union[google.cloud.recommendationengine_v1beta1.types.ImportUserEventsRequest, dict]):
The request object. Request message for the
ImportUserEvents request.
parent (str):
Required.
``projects/1234/locations/global/catalogs/default_catalog/eventStores/default_event_store``
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
request_id (str):
Optional. Unique identifier provided by client, within
the ancestor dataset scope. Ensures idempotency for
expensive long running operations. Server-generated if
unspecified. Up to 128 characters long. This is returned
as google.longrunning.Operation.name in the response.
Note that this field must not be set if the desired
input config is catalog_inline_source.
This corresponds to the ``request_id`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
input_config (google.cloud.recommendationengine_v1beta1.types.InputConfig):
Required. The desired input location
of the data.
This corresponds to the ``input_config`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
errors_config (google.cloud.recommendationengine_v1beta1.types.ImportErrorsConfig):
Optional. The desired location of
errors incurred during the Import.
This corresponds to the ``errors_config`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.api_core.operation.Operation:
An object representing a long-running operation.
The result type for the operation will be :class:`google.cloud.recommendationengine_v1beta1.types.ImportUserEventsResponse` Response of the ImportUserEventsRequest. If the long running
operation was successful, then this message is
returned by the
google.longrunning.Operations.response field if the
operation was successful.
Gets a list of user events within a time range, with
potential filtering.
Args:
request (Union[google.cloud.recommendationengine_v1beta1.types.ListUserEventsRequest, dict]):
The request object. Request message for ListUserEvents
method.
parent (str):
Required. The parent eventStore resource name, such as
``projects/*/locations/*/catalogs/default_catalog/eventStores/default_event_store``.
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
filter (str):
Optional. Filtering expression to specify restrictions
over returned events. This is a sequence of terms, where
each term applies some kind of a restriction to the
returned user events. Use this expression to restrict
results to a specific time range, or filter events by
eventType. eg: eventTime > "2012-04-23T18:25:43.511Z"
eventsMissingCatalogItems
eventTime<"2012-04-23T18:25:43.511Z" eventType=search
We expect only 3 types of fields:
::
* eventTime: this can be specified a maximum of 2 times, once with a
less than operator and once with a greater than operator. The
eventTime restrict should result in one contiguous valid eventTime
range.
* eventType: only 1 eventType restriction can be specified.
* eventsMissingCatalogItems: specififying this will restrict results
to events for which catalog items were not found in the catalog. The
default behavior is to return only those events for which catalog
items were found.
Some examples of valid filters expressions:
- Example 1: eventTime > "2012-04-23T18:25:43.511Z"
eventTime < "2012-04-23T18:30:43.511Z"
- Example 2: eventTime > "2012-04-23T18:25:43.511Z"
eventType = detail-page-view
- Example 3: eventsMissingCatalogItems eventType =
search eventTime < "2018-04-23T18:30:43.511Z"
- Example 4: eventTime > "2012-04-23T18:25:43.511Z"
- Example 5: eventType = search
- Example 6: eventsMissingCatalogItems
This corresponds to the ``filter`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.recommendationengine_v1beta1.services.user_event_service.pagers.ListUserEventsPager:
Response message for ListUserEvents
method.
Iterating over this object will yield
results and resolve additional pages
automatically.
Parse a billing_account path into its component segments.
Parse a folder path into its component segments.
Parse a location path into its component segments.
Parse a organization path into its component segments.
Parse a project path into its component segments.
Parses a event_store path into its component segments.
Deletes permanently all user events specified by the
filter provided. Depending on the number of events
specified by the filter, this operation could take hours
or days to complete. To test a filter, use the list
command first.
Args:
request (Union[google.cloud.recommendationengine_v1beta1.types.PurgeUserEventsRequest, dict]):
The request object. Request message for PurgeUserEvents
method.
parent (str):
Required. The resource name of the event_store under
which the events are created. The format is
``projects/${projectId}/locations/global/catalogs/${catalogId}/eventStores/${eventStoreId}``
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
filter (str):
Required. The filter string to specify the events to be
deleted. Empty string filter is not allowed. This filter
can also be used with ListUserEvents API to list events
that will be deleted. The eligible fields for filtering
are:
- eventType - UserEvent.eventType field of type string.
- eventTime - in ISO 8601 "zulu" format.
- visitorId - field of type string. Specifying this
will delete all events associated with a visitor.
- userId - field of type string. Specifying this will
delete all events associated with a user. Example 1:
Deleting all events in a time range.
``eventTime > "2012-04-23T18:25:43.511Z" eventTime < "2012-04-23T18:30:43.511Z"``
Example 2: Deleting specific eventType in time range.
``eventTime > "2012-04-23T18:25:43.511Z" eventType = "detail-page-view"``
Example 3: Deleting all events for a specific visitor
``visitorId = visitor1024`` The filtering fields are
assumed to have an implicit AND.
This corresponds to the ``filter`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
force (bool):
Optional. The default value is false.
Override this flag to true to actually
perform the purge. If the field is not
set to true, a sampling of events to be
deleted will be returned.
This corresponds to the ``force`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.api_core.operation.Operation:
An object representing a long-running operation.
The result type for the operation will be :class:`google.cloud.recommendationengine_v1beta1.types.PurgeUserEventsResponse` Response of the PurgeUserEventsRequest. If the long running operation is
successfully done, then this message is returned by
the google.longrunning.Operations.response field.
Returns the transport used by the client instance.
Returns:
UserEventServiceTransport: The transport used by the client
instance.
Writes a single user event.
Args:
request (Union[google.cloud.recommendationengine_v1beta1.types.WriteUserEventRequest, dict]):
The request object. Request message for WriteUserEvent
method.
parent (str):
Required. The parent eventStore resource name, such as
``projects/1234/locations/global/catalogs/default_catalog/eventStores/default_event_store``.
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
user_event (google.cloud.recommendationengine_v1beta1.types.UserEvent):
Required. User event to write.
This corresponds to the ``user_event`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.recommendationengine_v1beta1.types.UserEvent:
UserEvent captures all metadata
information recommendation engine needs
to know about how end users interact
with customers' website.
-*- coding: utf-8 -*- Copyright 2020 Google LLC Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. type: ignore type: ignore type: ignore type: ignore type: ignore type: ignore type: ignore type: ignore type: ignore type: ignore type: ignore type: ignore type: ignore type: ignore type: Dict[str, Type[UserEventServiceTransport]] If a specific transport is requested, return that one. No transport is requested; return the default (that is, the first one in the dictionary). type: ignore Create SSL credentials for mutual TLS if needed. Figure out which api endpoint to use. Save or instantiate the transport. Ordinarily, we provide the transport, but allowing a custom transport instance provides an extensibility point for unusual situations. transport is a UserEventServiceTransport instance. Create or coerce a protobuf request object. Sanity check: If we got a request object, we should *not* have gotten any keyword arguments that map to the request. Minor optimization to avoid making a copy if the user passes in a user_event_service.WriteUserEventRequest. There's no risk of modifying the input as we've already verified there are no flattened fields. If we have keyword arguments corresponding to fields on the request, apply these. Wrap the RPC method; this adds retry and timeout information, and friendly error handling. Certain fields should be provided within the metadata header; add these here. Send the request. Done; return the response. Create or coerce a protobuf request object. Sanity check: If we got a request object, we should *not* have gotten any keyword arguments that map to the request. Minor optimization to avoid making a copy if the user passes in a user_event_service.CollectUserEventRequest. There's no risk of modifying the input as we've already verified there are no flattened fields. If we have keyword arguments corresponding to fields on the request, apply these. Wrap the RPC method; this adds retry and timeout information, and friendly error handling. Certain fields should be provided within the metadata header; add these here. Send the request. Done; return the response. Create or coerce a protobuf request object. Sanity check: If we got a request object, we should *not* have gotten any keyword arguments that map to the request. Minor optimization to avoid making a copy if the user passes in a user_event_service.ListUserEventsRequest. There's no risk of modifying the input as we've already verified there are no flattened fields. If we have keyword arguments corresponding to fields on the request, apply these. Wrap the RPC method; this adds retry and timeout information, and friendly error handling. Certain fields should be provided within the metadata header; add these here. Send the request. This method is paged; wrap the response in a pager, which provides an `__iter__` convenience method. Done; return the response. Create or coerce a protobuf request object. Sanity check: If we got a request object, we should *not* have gotten any keyword arguments that map to the request. Minor optimization to avoid making a copy if the user passes in a user_event_service.PurgeUserEventsRequest. There's no risk of modifying the input as we've already verified there are no flattened fields. If we have keyword arguments corresponding to fields on the request, apply these. Wrap the RPC method; this adds retry and timeout information, and friendly error handling. Certain fields should be provided within the metadata header; add these here. Send the request. Wrap the response in an operation future. Done; return the response. Create or coerce a protobuf request object. Sanity check: If we got a request object, we should *not* have gotten any keyword arguments that map to the request. Minor optimization to avoid making a copy if the user passes in a import_.ImportUserEventsRequest. There's no risk of modifying the input as we've already verified there are no flattened fields. If we have keyword arguments corresponding to fields on the request, apply these. Wrap the RPC method; this adds retry and timeout information, and friendly error handling. Certain fields should be provided within the metadata header; add these here. Send the request. Wrap the response in an operation future. Done; return the response. | 23,188 | en | 0.752753 |
# -*- coding: utf-8 -*-
import os
import re
from math import radians, degrees
import numpy as np
import pandas as pd
import cv2
from pdftabextract import imgproc
from pdftabextract.geom import pt
from pdftabextract.common import read_xml, parse_pages, save_page_grids
from pdftabextract.textboxes import rotate_textboxes, sorted_by_attr
from pdftabextract.clustering import (find_clusters_1d_break_dist,
calc_cluster_centers_1d,
zip_clusters_and_values)
from pdftabextract.splitpages import split_page_texts, create_split_pages_dict_structure
from pdftabextract.extract import make_grid_from_positions, fit_texts_into_grid, datatable_to_dataframe
#%% Some constants
#DATAPATH = 'data/'
#DATAPATH = 'ip/'
#OUTPUTPATH = 'generated_output/'
#OUTPUTPATH = 'op/'
#INPUT_XML = 'output.xml'
#INPUT_XML = 'output.xml'
DATAPATH = 'data/'
OUTPUTPATH = 'generated_output/'
INPUT_XML = 'schoollist_1.pdf.xml'
MIN_ROW_HEIGHT = 260 # minimum height of a row in pixels, measured in the scanned pages
MIN_COL_WIDTH = 194 # very important. the minimum width of a column in pixels, measured in the scanned pages
#%% Some helper functions
def save_image_w_lines(iproc_obj, imgfilebasename, orig_img_as_background, file_suffix_prefix=''):
file_suffix = 'lines-orig' if orig_img_as_background else 'lines'
img_lines = iproc_obj.draw_lines(orig_img_as_background=orig_img_as_background)
img_lines_file = os.path.join(OUTPUTPATH, '%s-%s.png' % (imgfilebasename, file_suffix_prefix + file_suffix))
print("> saving image with detected lines to '%s'" % img_lines_file)
cv2.imwrite(img_lines_file, img_lines)
#%% Read the XML
# Load the XML that was generated with pdftohtml
xmltree, xmlroot = read_xml(os.path.join(DATAPATH, INPUT_XML))
# parse it and generate a dict of pages
pages = parse_pages(xmlroot, require_image=True)
#%% Split the scanned double pages so that we can later process the lists page-by-page
split_texts_and_images = [] # list of tuples with (double page, split text boxes, split images)
for p_num, p in pages.items():
# get the image file of the scanned page
imgfilebasename = p['image'][:p['image'].rindex('.')]
imgfile = os.path.join(DATAPATH, p['image'])
print("page %d: detecting lines in image file '%s'..." % (p_num, imgfile))
# create an image processing object with the scanned page
iproc_obj = imgproc.ImageProc(imgfile)
# calculate the scaling of the image file in relation to the text boxes coordinate system dimensions
page_scaling_x = iproc_obj.img_w / p['width']
page_scaling_y = iproc_obj.img_h / p['height']
image_scaling = (page_scaling_x, # scaling in X-direction
page_scaling_y) # scaling in Y-direction
# detect the lines in the double pages
lines_hough = iproc_obj.detect_lines(canny_low_thresh=50, canny_high_thresh=150, canny_kernel_size=3,
hough_rho_res=1,
hough_theta_res=np.pi/500,
hough_votes_thresh=350)
print("> found %d lines" % len(lines_hough))
save_image_w_lines(iproc_obj, imgfilebasename, True, 'bothpages-')
# find the vertical line that separates both sides
sep_line_img_x = iproc_obj.find_pages_separator_line(dist_thresh=MIN_COL_WIDTH/2)
sep_line_page_x = sep_line_img_x / page_scaling_x
print("> found pages separator line at %f (image space position) / %f (page space position)"
% (sep_line_img_x, sep_line_page_x))
# split the scanned double page at the separator line
split_images = iproc_obj.split_image(sep_line_img_x)
# split the textboxes at the separator line
split_texts = split_page_texts(p, sep_line_page_x)
split_texts_and_images.append((p, split_texts, split_images))
# generate a new XML and "pages" dict structure from the split pages
split_pages_xmlfile = os.path.join(OUTPUTPATH, INPUT_XML[:INPUT_XML.rindex('.')] + '.split.xml')
print("> saving split pages XML to '%s'" % split_pages_xmlfile)
split_tree, split_root, split_pages = create_split_pages_dict_structure(split_texts_and_images,
save_to_output_path=split_pages_xmlfile)
# we don't need the original double pages any more, we'll work with 'split_pages'
del pages
#%% Detect clusters of horizontal lines using the image processing module and rotate back or deskew pages
hori_lines_clusters = {}
pages_image_scaling = {} # scaling of the scanned page image in relation to the OCR page dimensions for each page
for p_num, p in split_pages.items():
# get the image file of the scanned page
imgfilebasename = p['image'][:p['image'].rindex('.')]
imgfile = os.path.join(OUTPUTPATH, p['image'])
print("page %d: detecting lines in image file '%s'..." % (p_num, imgfile))
# create an image processing object with the scanned page
iproc_obj = imgproc.ImageProc(imgfile)
# calculate the scaling of the image file in relation to the text boxes coordinate system dimensions
page_scaling_x = iproc_obj.img_w / p['width']
page_scaling_y = iproc_obj.img_h / p['height']
pages_image_scaling[p_num] = (page_scaling_x, # scaling in X-direction
page_scaling_y) # scaling in Y-direction
# detect the lines
lines_hough = iproc_obj.detect_lines(canny_low_thresh=50, canny_high_thresh=150, canny_kernel_size=3,
hough_rho_res=1,
hough_theta_res=np.pi/500,
hough_votes_thresh=round(0.2 * iproc_obj.img_w))
print("> found %d lines" % len(lines_hough))
save_image_w_lines(iproc_obj, imgfilebasename, True)
save_image_w_lines(iproc_obj, imgfilebasename, False)
# find rotation or skew
# the parameters are:
# 1. the minimum threshold in radians for a rotation to be counted as such
# 2. the maximum threshold for the difference between horizontal and vertical line rotation (to detect skew)
# 3. an optional threshold to filter out "stray" lines whose angle is too far apart from the median angle of
# all other lines that go in the same direction (no effect here)
rot_or_skew_type, rot_or_skew_radians = iproc_obj.find_rotation_or_skew(radians(0.5), # uses "lines_hough"
radians(1),
omit_on_rot_thresh=radians(0.5))
# rotate back text boxes
# since often no vertical lines can be detected and hence it cannot be determined if the page is rotated or skewed,
# we assume that it's always rotated
if rot_or_skew_type is not None:
print("> rotating back by %f°" % -degrees(rot_or_skew_radians))
rotate_textboxes(p, -rot_or_skew_radians, pt(0, 0))
# rotate back detected lines
lines_hough = iproc_obj.apply_found_rotation_or_skew(rot_or_skew_type, -rot_or_skew_radians)
save_image_w_lines(iproc_obj, imgfilebasename + '-repaired', True)
save_image_w_lines(iproc_obj, imgfilebasename + '-repaired', False)
# cluster the detected *horizontal* lines using find_clusters_1d_break_dist as simple clustering function
# (break on distance MIN_ROW_HEIGHT/2)
# additionally, remove all cluster sections that are considered empty
# a cluster is considered empty when the number of text boxes in it is below 10% of the median number of text boxes
# per cluster section
hori_clusters = iproc_obj.find_clusters(imgproc.DIRECTION_HORIZONTAL, find_clusters_1d_break_dist,
remove_empty_cluster_sections_use_texts=p['texts'], # use this page's textboxes
remove_empty_cluster_sections_n_texts_ratio=0.1, # 10% rule
remove_empty_cluster_sections_scaling=page_scaling_y, # the positions are in "scanned image space" -> we scale them to "text box space"
dist_thresh=MIN_ROW_HEIGHT/2)
print("> found %d clusters" % len(hori_clusters))
if len(hori_clusters) > 0:
# draw the clusters
img_w_clusters = iproc_obj.draw_line_clusters(imgproc.DIRECTION_HORIZONTAL, hori_clusters)
save_img_file = os.path.join(OUTPUTPATH, '%s-hori-clusters.png' % imgfilebasename)
print("> saving image with detected horizontal clusters to '%s'" % save_img_file)
cv2.imwrite(save_img_file, img_w_clusters)
hori_lines_clusters[p_num] = hori_clusters
else:
print("> no horizontal line clusters found")
# save split and repaired XML (i.e. XML with deskewed textbox positions)
output_files_basename = INPUT_XML[:INPUT_XML.rindex('.')]
repaired_xmlfile = os.path.join(OUTPUTPATH, output_files_basename + '.split.repaired.xml')
print("saving split and repaired XML file to '%s'..." % repaired_xmlfile)
split_tree.write(repaired_xmlfile)
#%% Determine the rows and columns of the tables
pttrn_schoolnum = re.compile(r'^\d{6}$') # a valid school number indicates a table row
page_grids = {}
print("detecting rows and columns...")
for p_num, p in split_pages.items():
scaling_x, scaling_y = pages_image_scaling[p_num]
# try to find out the table rows in this page using the horizontal lines that were detected before
hori_lines = list(np.array(calc_cluster_centers_1d(hori_lines_clusters[p_num])) / scaling_y)
hori_lines.append(p['height']) # last line: page bottom
prev_line_y = 0
row_texts = []
row_positions = []
in_table = False # is True when the current segment is a real table row (not a table header or surrounding text)
for line_y in hori_lines:
# get all texts in this row
segment_texts = [t for t in p['texts'] if prev_line_y < t['bottom'] <= line_y]
if not segment_texts: continue # skip empty rows
# try to find the start and the end of the table
for t in segment_texts:
t_val = t['value'].strip()
if pttrn_schoolnum.search(t_val): # if this matches, we found the start of the table
if not in_table:
in_table = True
row_positions.append(prev_line_y)
break
else:
if in_table: # we found the end of the table
in_table = False
if in_table: # this is a table row, so add the texts and row positions to the respective lists
row_texts.append(segment_texts)
row_positions.append(line_y)
prev_line_y = line_y
# try to find out the table columns in this page using the distribution of x-coordinates of the left position of
# each text box in all rows
text_xs = []
for texts in row_texts:
text_xs.extend([t['left'] for t in texts])
text_xs = np.array(text_xs)
# make clusters of x positions
text_xs_clusters = find_clusters_1d_break_dist(text_xs, dist_thresh=MIN_COL_WIDTH/2/scaling_x)
text_xs_clusters_w_values = zip_clusters_and_values(text_xs_clusters, text_xs)
col_positions = calc_cluster_centers_1d(text_xs_clusters_w_values)
# remove falsely identified columns (i.e. merge columns with only a few text boxes)
filtered_col_positions = []
n_rows = len(row_positions)
n_cols = len(col_positions)
if n_cols > 1 and n_rows > 1:
top_y = row_positions[0]
bottom_y = row_positions[-1]
# append the rightmost text's right border as the last column border
rightmost_pos = sorted_by_attr(p['texts'], 'right')[-1]['right']
col_positions.append(rightmost_pos)
# merge columns with few text boxes
texts_in_table = [t for t in p['texts'] if top_y < t['top'] + t['height']/2 <= bottom_y]
prev_col_x = col_positions[0]
for col_x in col_positions[1:]:
col_texts = [t for t in texts_in_table if prev_col_x < t['left'] + t['width']/2 <= col_x]
if len(col_texts) >= n_rows: # there should be at least one text box per row
filtered_col_positions.append(prev_col_x)
last_col_x = col_x
prev_col_x = col_x
# manually add border for the last column because it has very few or no text boxes
filtered_col_positions.append(filtered_col_positions[-1] + (rightmost_pos - filtered_col_positions[-1]) / 2)
filtered_col_positions.append(rightmost_pos)
# create the grid
if filtered_col_positions:
grid = make_grid_from_positions(filtered_col_positions, row_positions)
n_rows = len(grid)
n_cols = len(grid[0])
print("> page %d: grid with %d rows, %d columns" % (p_num, n_rows, n_cols))
page_grids[p_num] = grid
else: # this happens for the first page as there's no table on that
print("> page %d: no table found" % p_num)
# save the page grids
# After you created the page grids, you should then check that they're correct using pdf2xml-viewer's
# loadGridFile() function
page_grids_file = os.path.join(OUTPUTPATH, output_files_basename + '.pagegrids.json')
print("saving page grids JSON file to '%s'" % page_grids_file)
save_page_grids(page_grids, page_grids_file)
#%% Create data frames (requires pandas library)
# For sake of simplicity, we will just fit the text boxes into the grid, merge the texts in their cells (splitting text
# boxes to separate lines if necessary) and output the result. Normally, you would do some more parsing here, e.g.
# extracting the address components from the second column.
full_df = pd.DataFrame()
print("fitting text boxes into page grids and generating final output...")
for p_num, p in split_pages.items():
if p_num not in page_grids: continue # happens when no table was detected
print("> page %d" % p_num)
datatable, unmatched_texts = fit_texts_into_grid(p['texts'], page_grids[p_num], return_unmatched_texts=True)
df = datatable_to_dataframe(datatable, split_texts_in_lines=True)
df['from_page'] = p_num
full_df = full_df.append(df, ignore_index=True)
print("extracted %d rows from %d pages" % (len(full_df), len(split_pages)))
csv_output_file = os.path.join(OUTPUTPATH, output_files_basename + '.csv')
print("saving extracted data to '%s'" % csv_output_file)
full_df.to_csv(csv_output_file, index=False)
excel_output_file = os.path.join(OUTPUTPATH, output_files_basename + '.xlsx')
print("saving extracted data to '%s'" % excel_output_file)
full_df.to_excel(excel_output_file, index=False)
| examples/eg1/eg1.py | 14,963 | -*- coding: utf-8 -*-%% Some constantsDATAPATH = 'data/'DATAPATH = 'ip/'OUTPUTPATH = 'generated_output/'OUTPUTPATH = 'op/'INPUT_XML = 'output.xml'INPUT_XML = 'output.xml' minimum height of a row in pixels, measured in the scanned pages very important. the minimum width of a column in pixels, measured in the scanned pages%% Some helper functions%% Read the XML Load the XML that was generated with pdftohtml parse it and generate a dict of pages%% Split the scanned double pages so that we can later process the lists page-by-page list of tuples with (double page, split text boxes, split images) get the image file of the scanned page create an image processing object with the scanned page calculate the scaling of the image file in relation to the text boxes coordinate system dimensions scaling in X-direction scaling in Y-direction detect the lines in the double pages find the vertical line that separates both sides split the scanned double page at the separator line split the textboxes at the separator line generate a new XML and "pages" dict structure from the split pages we don't need the original double pages any more, we'll work with 'split_pages'%% Detect clusters of horizontal lines using the image processing module and rotate back or deskew pages scaling of the scanned page image in relation to the OCR page dimensions for each page get the image file of the scanned page create an image processing object with the scanned page calculate the scaling of the image file in relation to the text boxes coordinate system dimensions scaling in X-direction scaling in Y-direction detect the lines find rotation or skew the parameters are: 1. the minimum threshold in radians for a rotation to be counted as such 2. the maximum threshold for the difference between horizontal and vertical line rotation (to detect skew) 3. an optional threshold to filter out "stray" lines whose angle is too far apart from the median angle of all other lines that go in the same direction (no effect here) uses "lines_hough" rotate back text boxes since often no vertical lines can be detected and hence it cannot be determined if the page is rotated or skewed, we assume that it's always rotated rotate back detected lines cluster the detected *horizontal* lines using find_clusters_1d_break_dist as simple clustering function (break on distance MIN_ROW_HEIGHT/2) additionally, remove all cluster sections that are considered empty a cluster is considered empty when the number of text boxes in it is below 10% of the median number of text boxes per cluster section use this page's textboxes 10% rule the positions are in "scanned image space" -> we scale them to "text box space" draw the clusters save split and repaired XML (i.e. XML with deskewed textbox positions)%% Determine the rows and columns of the tables a valid school number indicates a table row try to find out the table rows in this page using the horizontal lines that were detected before last line: page bottom is True when the current segment is a real table row (not a table header or surrounding text) get all texts in this row skip empty rows try to find the start and the end of the table if this matches, we found the start of the table we found the end of the table this is a table row, so add the texts and row positions to the respective lists try to find out the table columns in this page using the distribution of x-coordinates of the left position of each text box in all rows make clusters of x positions remove falsely identified columns (i.e. merge columns with only a few text boxes) append the rightmost text's right border as the last column border merge columns with few text boxes there should be at least one text box per row manually add border for the last column because it has very few or no text boxes create the grid this happens for the first page as there's no table on that save the page grids After you created the page grids, you should then check that they're correct using pdf2xml-viewer's loadGridFile() function%% Create data frames (requires pandas library) For sake of simplicity, we will just fit the text boxes into the grid, merge the texts in their cells (splitting text boxes to separate lines if necessary) and output the result. Normally, you would do some more parsing here, e.g. extracting the address components from the second column. happens when no table was detected | 4,393 | en | 0.845568 |
# coding=utf-8
import sys
import argparse
import os
from tensorflow.python.platform import gfile
import numpy as np
import tensorflow as tf
from tensorflow.python.layers.core import Dense
from utils.data_manager import load_data, load_data_one
from collections import defaultdict
from argparse import ArgumentParser
from decode_helper import decode_one
import sys
reload(sys)
sys.setdefaultencoding('utf8')
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
from tf_helper import train, evaluate, decode_data, decode_data_recover
from model1 import construct_graph
def init_args():
arg_parser = argparse.ArgumentParser()
arg_parser.add_argument(
'--data_path',
default=os.path.dirname(os.path.abspath(__file__)) + '/data',
type=str,
help='Data path.')
arg_parser.add_argument(
'--load_data', default=False, type=bool, help='Load data.')
arg_parser.add_argument(
'--data',
choices=['wikisql', 'spider', 'overnight', 'overnight_set'],
default='wikisql',
help='data to train & test')
#arg_parser.add_argument('--tran_data', choices=['wikisql', 'spider', 'overnight'], default='overnight', help='data to transfer')
arg_parser.add_argument(
'--subset', choices=['all'], default='all', help='Subset of data.')
arg_parser.add_argument(
'--maxlen', default=60, type=int, help='Data record max length.')
arg_parser.add_argument(
'--annotation_path',
default=os.path.dirname(os.path.abspath(__file__)) +
'/data/DATA/wiki/',
type=str,
help='Data annotation path.')
arg_parser.add_argument(
'--mode',
choices=['train', 'infer', 'transfer','txt'],
default='infer',
help='Run mode')
#### Model configuration ####
arg_parser.add_argument(
'--cell',
choices=['gru'],
default='gru',
help='Type of cell used, currently only standard GRU cell is supported'
)
arg_parser.add_argument(
'--output_vocab_size',
default=20637,
#default=20452,
type=int,
help='Output vocabulary size.')
# Embedding sizes
arg_parser.add_argument(
'--embedding_dim',
default=300,
type=int,
help='Size of word embeddings')
#Hidden sizes
arg_parser.add_argument(
'--dim', default=400, type=int, help='Size of GRU hidden states')
arg_parser.add_argument(
'--hidden_size',
default=256,
type=int,
help='Size of LSTM hidden states')
arg_parser.add_argument(
'--no_copy',
default=False,
action='store_true',
help='Do not use copy mechanism')
#### Training ####
arg_parser.add_argument(
'--vocab', type=str, help='Path of the serialized vocabulary')
arg_parser.add_argument(
'--glove_embed_path',
default=None,
type=str,
help='Path to pretrained Glove mebedding')
arg_parser.add_argument(
'--batch_size', default=128, type=int, help='Batch size')
arg_parser.add_argument(
'--in_drop', default=0., type=float, help='In dropout rate')
arg_parser.add_argument(
'--out_drop', default=0., type=float, help='Out dropout rate')
# training details
arg_parser.add_argument(
'--valid_epoch_interval',
default=1,
type=int,
help='Perform validation every x epoch')
arg_parser.add_argument(
'--clip_grad', default=5., type=float, help='Clip gradients')
arg_parser.add_argument(
'--total_epochs', default=40, type=int, help='# of training epoches')
arg_parser.add_argument(
'--epochs', default=1, type=int, help='Record per x epoches')
arg_parser.add_argument(
'--lr', default=0.0001, type=float, help='Learning rate')
arg_parser.add_argument(
'--lr_decay',
default=0.5,
type=float,
help='decay learning rate if the validation performance drops')
#### decoding/validation/testing ####
arg_parser.add_argument(
'--load_model', default=False, type=bool, help='Whether to load model')
arg_parser.add_argument(
'--beam_width', default=5, type=int, help='Beam size for beam search')
arg_parser.add_argument(
'--decode_max_time_step',
default=100,
type=int,
help='Maximum number of time steps used '
'in decoding and sampling')
args = arg_parser.parse_args()
return args
def model(args, train_env, infer_env):
tf.reset_default_graph()
train_graph = tf.Graph()
infer_graph = tf.Graph()
with train_graph.as_default():
train_env.x = tf.placeholder(
tf.int32, shape=[None, args.maxlen], name='x')
train_env.y = tf.placeholder(tf.int32, (None, args.maxlen), name='y')
train_env.training = tf.placeholder_with_default(
False, (), name='train_mode')
train_env.train_op, train_env.loss, train_env.acc, sample_ids, logits = construct_graph(
"train", train_env, args)
train_env.saver = tf.train.Saver()
#[print(n.name) for n in tf.get_default_graph().as_graph_def().node if 'xxxxx' in n.name]
with infer_graph.as_default():
infer_env.x = tf.placeholder(
tf.int32, shape=[None, args.maxlen], name='x')
infer_env.y = tf.placeholder(tf.int32, (None, args.maxlen), name='y')
infer_env.training = tf.placeholder_with_default(
False, (), name='train_mode')
_, infer_env.loss, infer_env.acc, infer_env.pred_ids, _ = construct_graph(
"infer", infer_env, args)
infer_env.infer_saver = tf.train.Saver()
return train_graph, infer_graph
def inferrence(args):
args.load_model = True
class Dummy:
pass
train_env = Dummy()
infer_env = Dummy()
_, infer_graph = model(args, train_env, infer_env)
args.data = 'wikisql'
args.load_data = True
X_train, y_train = load_data(maxlen=args.maxlen,load=args.load_data, s='train')
X_test, y_test = load_data(maxlen=args.maxlen,load=args.load_data, s='test')
X_dev, y_dev = load_data(maxlen=args.maxlen,load=args.load_data, s='dev')
#X_train, y_train, X_test, y_test, X_dev, y_dev = load_data(args)
model2load = 'model/{}'.format(args.subset)
sess = tf.InteractiveSession(graph=infer_graph)
infer_env.infer_saver.restore(sess, model2load)
print('===========dev set============')
decode_data(sess, infer_env, X_dev, y_dev)
em = decode_data_recover(sess, infer_env, X_dev, y_dev, 'dev')
print('==========test set===========')
decode_data(sess, infer_env, X_test, y_test)
test_em = decode_data_recover(sess, infer_env, X_test, y_test,
'test')
return
def infer_one(args):
args.load_model = True
class Dummy:
pass
train_env = Dummy()
infer_env = Dummy()
_, infer_graph = model(args, train_env, infer_env)
args.data = 'wikisql'
args.load_data = True
model2load = 'model/{}'.format(args.subset)
sess = tf.InteractiveSession(graph=infer_graph)
infer_env.infer_saver.restore(sess, model2load)
print('===========decode============')
X_one = load_data_one(args.maxlen, 'qs.txt')
decode_one(sess, infer_env, X_one)
return
def train_model(args):
class Dummy:
pass
train_env = Dummy()
infer_env = Dummy()
train_graph, infer_graph = model(args, train_env, infer_env)
args.data = 'wikisql'
args.load_data = True
args.load_model = False
X_train, y_train = load_data(maxlen=args.maxlen,load=args.load_data, s='train')
X_test, y_test = load_data(maxlen=args.maxlen,load=args.load_data, s='test')
X_dev, y_dev = load_data(maxlen=args.maxlen,load=args.load_data, s='dev')
#X_train, y_train, X_test, y_test, X_dev, y_dev = load_data(args)
model2load = 'model/{}'.format(args.subset)
max_em, global_test_em, best_base = -1, -1, -1
acc = 0
sess1 = tf.InteractiveSession(graph=train_graph)
sess1.run(tf.global_variables_initializer())
sess1.run(tf.local_variables_initializer())
sess2 = tf.InteractiveSession(graph=infer_graph)
sess2.run(tf.global_variables_initializer())
sess2.run(tf.global_variables_initializer())
for base in range(args.total_epochs / args.epochs):
print('\nIteration: %d (%d epochs)' % (base, args.epochs))
model2load = train(
sess1,
train_env,
X_train,
y_train,
epochs=args.epochs,
load=args.load_model,
name=args.subset,
batch_size=args.batch_size,
base=base,
model2Bload=model2load)
args.load_model = True
infer_env.infer_saver.restore(sess2, model2load)
print('===========dev set============')
dev_em = decode_data(sess2, infer_env, X_dev, y_dev)
dev_em = decode_data_recover(sess2, infer_env, X_dev, y_dev,
'dev')
print('==========test set===========')
test_em = decode_data(sess2, infer_env, X_test, y_test)
test_em = decode_data_recover(sess2, infer_env, X_test, y_test,
'test')
if dev_em > max_em:
max_em = dev_em
global_test_em = test_em
best_base = base
print('\n Saving model for best testing')
train_env.saver.save(sess1, 'best_model/{0}-{1}-{2:.2f}'.format(args.subset, base, max_em))
print('Max EM acc: %.4f during %d iteration.' % (max_em, best_base))
print('test EM acc: %.4f ' % global_test_em)
return
def transfer(args):
load_model = args.load_model if args.mode == 'train' else True
class Dummy:
pass
train_env = Dummy()
infer_env = Dummy()
_, infer_graph = model(args, train_env, infer_env)
args.data = 'overnight'
args.load_data = True
#X_tran, y_tran = load_data(args)
X_tran, y_tran = load_data(maxlen=args.maxlen,load=args.load_data, s='overnight')
args.data = 'overnight_set'
#tran_sets = load_data(args)
tran_sets = load_data(maxlen=args.maxlen,load=args.load_data, s='overnight_set')
model2load = 'model/{}'.format(args.subset)
sess = tf.InteractiveSession(graph=infer_graph)
infer_env.infer_saver.restore(sess, model2load)
print('========subset transfer set========')
subsets = ['basketball', 'calendar', 'housing', 'recipes', 'restaurants']
for subset, (X_tran_subset, y_tran_subset) in zip(subsets, tran_sets):
print('---------' + subset + '---------')
tran_em = decode_data(
sess,
infer_env,
X_tran_subset,
y_tran_subset,
filename=str(subset + '.txt'))
print('===========transfer set============')
tran_em = decode_data(sess, infer_env, X_tran, y_tran)
return
if __name__ == '__main__':
args = init_args()
print(args)
if args.mode == 'train':
print('\nTrain model.')
train_model(args)
elif args.mode == 'infer':
print('\nInference.')
inferrence(args)
elif args.mode == 'txt':
print('\nInference from txt.')
infer_one(args)
elif args.mode == 'transfer':
print('\nTransfer.')
transfer(args)
| main.py | 11,385 | coding=utf-8arg_parser.add_argument('--tran_data', choices=['wikisql', 'spider', 'overnight'], default='overnight', help='data to transfer') Model configuration default=20452, Embedding sizesHidden sizes Training training details decoding/validation/testing [print(n.name) for n in tf.get_default_graph().as_graph_def().node if 'xxxxx' in n.name]X_train, y_train, X_test, y_test, X_dev, y_dev = load_data(args)X_train, y_train, X_test, y_test, X_dev, y_dev = load_data(args)X_tran, y_tran = load_data(args)tran_sets = load_data(args) | 534 | en | 0.362491 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.utils import timezone
class Migration(migrations.Migration):
dependencies = [
('nationalparks', '0009_auto_20150831_1721'),
]
operations = [
migrations.AddField(
model_name='federalsite',
name='active_participant',
field=models.BooleanField(default=True),
),
migrations.AddField(
model_name='federalsite',
name='update_timestamp',
field=models.DateTimeField(auto_now=True, default=timezone.now()),
preserve_default=False,
),
migrations.AddField(
model_name='federalsite',
name='version',
field=models.PositiveIntegerField(default=0),
preserve_default=False,
),
]
| ekip/nationalparks/migrations/0010_auto_20150902_1902.py | 888 | -*- coding: utf-8 -*- | 21 | en | 0.767281 |
# Copyright 2015 Internap.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from fake_switches.command_processing.command_processor import CommandProcessor
class BaseCommandProcessor(CommandProcessor):
def __init__(self, switch_configuration, terminal_controller, logger, piping_processor):
"""
:type switch_configuration: fake_switches.switch_configuration.SwitchConfiguration
:type terminal_controller: fake_switches.terminal.TerminalController
:type logger: logging.Logger
:type piping_processor: fake_switches.command_processing.piping_processor_base.PipingProcessorBase
"""
self.switch_configuration = switch_configuration
self.terminal_controller = terminal_controller
self.logger = logger
self.piping_processor = piping_processor
self.sub_processor = None
self.continuing_to = None
self.is_done = False
self.replace_input = False
self.awaiting_keystroke = False
def process_command(self, line):
if " | " in line:
line, piping_command = line.split(" | ", 1)
piping_started = self.activate_piping(piping_command)
if not piping_started:
return False
processed = False
if self.sub_processor:
processed = self.delegate_to_sub_processor(line)
if not processed:
if self.continuing_to:
processed = self.continue_command(line)
else:
processed = self.parse_and_execute_command(line)
if not self.continuing_to and not self.awaiting_keystroke and not self.is_done and processed and not self.sub_processor:
self.finish_piping()
self.show_prompt()
return processed
def parse_and_execute_command(self, line):
if line.strip():
func, args = self.get_command_func(line)
if not func:
self.logger.debug("%s can't process : %s, falling back to parent" % (self.__class__.__name__, line))
return False
else:
func(*args)
return True
def continue_command(self, line):
func = self.continuing_to
self.continue_to(None)
func(line)
return True
def delegate_to_sub_processor(self, line):
processed = self.sub_processor.process_command(line)
if self.sub_processor.is_done:
self.sub_processor = None
self.show_prompt()
return processed
def move_to(self, new_process_class, *args):
self.sub_processor = new_process_class(self.switch_configuration, self.terminal_controller, self.logger, self.piping_processor, *args)
self.sub_processor.show_prompt()
def continue_to(self, continuing_action):
self.continuing_to = continuing_action
def get_continue_command_func(self, cmd):
return getattr(self, 'continue_' + cmd, None)
def write(self, data):
filtered = self.pipe(data)
if filtered is not False:
self.terminal_controller.write(filtered)
def write_line(self, data):
self.write(data + "\n")
def show_prompt(self):
if self.sub_processor is not None:
self.sub_processor.show_prompt()
else:
self.write(self.get_prompt())
def get_prompt(self):
pass
def activate_piping(self, piping_command):
return self.piping_processor.start_listening(piping_command)
def pipe(self, data):
if self.piping_processor.is_listening():
return self.piping_processor.pipe(data)
else:
return data
def finish_piping(self):
if self.piping_processor.is_listening():
self.piping_processor.stop_listening()
def on_keystroke(self, callback, *args):
def on_keystroke_handler(key):
self.awaiting_keystroke = False
self.terminal_controller.remove_any_key_handler()
callback(*(args + (key,)))
self.terminal_controller.add_any_key_handler(on_keystroke_handler)
self.awaiting_keystroke = True
| fake_switches/command_processing/base_command_processor.py | 4,660 | :type switch_configuration: fake_switches.switch_configuration.SwitchConfiguration
:type terminal_controller: fake_switches.terminal.TerminalController
:type logger: logging.Logger
:type piping_processor: fake_switches.command_processing.piping_processor_base.PipingProcessorBase
Copyright 2015 Internap. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. | 828 | en | 0.759456 |
from __future__ import division
import sys
class Inf(float):
__name__ = __name__
__file__ = __file__
@staticmethod
def div(p, q):
"""
``p / q`` returning the correct infinity instead of
raising ZeroDivisionError.
"""
from math import copysign
if q != 0.0:
# Normal case, no infinities.
return p / q
elif p == 0.0:
return p / q # Doesn't return, raises an Exception.
elif copysign(1, q) > 0:
# q is +0.0, return inf with same sign as p.
return copysign(inf, p)
else:
# q is -0.0, return inf with flipped sign.
return copysign(inf, -p)
sys.modules[__name__] = inf = Inf("+inf")
| inf.py | 757 | ``p / q`` returning the correct infinity instead of
raising ZeroDivisionError.
Normal case, no infinities. Doesn't return, raises an Exception. q is +0.0, return inf with same sign as p. q is -0.0, return inf with flipped sign. | 229 | en | 0.875042 |
import os
import numpy as np
from shapely.geometry import box, Polygon
import geopandas as gpd
from ..utils.core import _check_gdf_load, _check_crs
from ..utils.tile import save_empty_geojson
from ..utils.geo import gdf_get_projection_unit, split_multi_geometries
from ..utils.geo import reproject_geometry
from tqdm import tqdm
class VectorTiler(object):
"""An object to tile geospatial vector data into smaller pieces.
Arguments
---------
Attributes
----------
"""
def __init__(self, dest_dir=None, dest_crs=None, output_format='GeoJSON',
verbose=False, super_verbose=False):
if verbose or super_verbose:
print('Preparing the tiler...')
self.dest_dir = dest_dir
if not os.path.isdir(self.dest_dir):
os.makedirs(self.dest_dir)
if dest_crs is not None:
self.dest_crs = _check_crs(dest_crs)
self.output_format = output_format
self.verbose = verbose
self.super_verbose = super_verbose
self.tile_paths = [] # retains the paths of the last call to .tile()
if self.verbose or self.super_verbose:
print('Initialization done.')
def tile(self, src, tile_bounds, tile_bounds_crs=None, geom_type='Polygon',
split_multi_geoms=True, min_partial_perc=0.0,
dest_fname_base='geoms', obj_id_col=None,
output_ext='.geojson'):
"""Tile `src` into vector data tiles bounded by `tile_bounds`.
Arguments
---------
src : `str` or :class:`geopandas.GeoDataFrame`
The source vector data to tile. Must either be a path to a GeoJSON
or a :class:`geopandas.GeoDataFrame`.
tile_bounds : list
A :class:`list` made up of ``[left, top, right, bottom] `` sublists
(this can be extracted from
:class:`solaris.tile.raster_tile.RasterTiler` after tiling imagery)
tile_bounds_crs : int, optional
The EPSG code or rasterio.crs.CRS object for the CRS that the tile
bounds are in. RasterTiler.tile returns the CRS of the raster tiles
and can be used here. If not provided, it's assumed that the CRS is the
same as in `src`. This argument must be provided if the bound
coordinates and `src` are not in the same CRS, otherwise tiling will
not occur correctly.
geom_type : str, optional (default: "Polygon")
The type of geometries contained within `src`. Defaults to
``"Polygon"``, can also be ``"LineString"``.
split_multi_geoms : bool, optional (default: True)
Should multi-polygons or multi-linestrings generated by clipping
a geometry into discontinuous pieces be separated? Defaults to yes
(``True``).
min_partial_perc : float, optional (default: 0.0)
The minimum percentage of a :class:`shapely.geometry.Polygon` 's
area or :class:`shapely.geometry.LineString` 's length that must
be retained within a tile's bounds to be included in the output.
Defaults to ``0.0``, meaning that the contained portion of a
clipped geometry will be included, no matter how small.
dest_fname_base : str, optional (default: 'geoms')
The base filename to use when creating outputs. The lower left
corner coordinates of the tile's bounding box will be appended
when saving.
obj_id_col : str, optional (default: None)
If ``split_multi_geoms=True``, the name of a column that specifies
a unique identifier for each geometry (e.g. the ``"BuildingId"``
column in many SpaceNet datasets.) See
:func:`solaris.utils.geo.split_multi_geometries` for more.
output_ext : str, optional, (default: geojson)
Extension of output files, can be 'geojson' or 'json'.
"""
tile_gen = self.tile_generator(src, tile_bounds, tile_bounds_crs,
geom_type, split_multi_geoms,
min_partial_perc,
obj_id_col=obj_id_col)
self.tile_paths = []
for tile_gdf, tb in tqdm(tile_gen):
if self.proj_unit not in ['meter', 'metre']:
dest_path = os.path.join(
self.dest_dir, '{}_{}_{}{}'.format(dest_fname_base,
np.round(tb[0], 3),
np.round(tb[3], 3),
output_ext))
else:
dest_path = os.path.join(
self.dest_dir, '{}_{}_{}{}'.format(dest_fname_base,
int(tb[0]),
int(tb[3]),
output_ext))
self.tile_paths.append(dest_path)
if len(tile_gdf) > 0:
tile_gdf.to_file(dest_path, driver='GeoJSON')
else:
save_empty_geojson(dest_path, self.dest_crs)
def tile_generator(self, src, tile_bounds, tile_bounds_crs=None,
geom_type='Polygon', split_multi_geoms=True,
min_partial_perc=0.0, obj_id_col=None):
"""Generate `src` vector data tiles bounded by `tile_bounds`.
Arguments
---------
src : `str` or :class:`geopandas.GeoDataFrame`
The source vector data to tile. Must either be a path to a GeoJSON
or a :class:`geopandas.GeoDataFrame`.
tile_bounds : list
A :class:`list` made up of ``[left, top, right, bottom] `` sublists
(this can be extracted from
:class:`solaris.tile.raster_tile.RasterTiler` after tiling imagery)
tile_bounds_crs : int, optional
The EPSG code for the CRS that the tile bounds are in. If not
provided, it's assumed that the CRS is the same as in `src`. This
argument must be provided if the bound coordinates and `src` are
not in the same CRS, otherwise tiling will not occur correctly.
geom_type : str, optional (default: "Polygon")
The type of geometries contained within `src`. Defaults to
``"Polygon"``, can also be ``"LineString"``.
split_multi_geoms : bool, optional (default: True)
Should multi-polygons or multi-linestrings generated by clipping
a geometry into discontinuous pieces be separated? Defaults to yes
(``True``).
min_partial_perc : float, optional (default: 0.0)
The minimum percentage of a :class:`shapely.geometry.Polygon` 's
area or :class:`shapely.geometry.LineString` 's length that must
be retained within a tile's bounds to be included in the output.
Defaults to ``0.0``, meaning that the contained portion of a
clipped geometry will be included, no matter how small.
obj_id_col : str, optional (default: None)
If ``split_multi_geoms=True``, the name of a column that specifies
a unique identifier for each geometry (e.g. the ``"BuildingId"``
column in many SpaceNet datasets.) See
:func:`solaris.utils.geo.split_multi_geometries` for more.
Yields
------
tile_gdf : :class:`geopandas.GeoDataFrame`
A tile geodataframe.
tb : list
A list with ``[left, top, right, bottom] `` coordinates for the
boundaries contained by `tile_gdf`.
"""
self.src = _check_gdf_load(src)
if self.verbose:
print("Num tiles:", len(tile_bounds))
self.src_crs = _check_crs(self.src.crs)
# check if the tile bounds and vector are in the same crs
if tile_bounds_crs is not None:
tile_bounds_crs = _check_crs(tile_bounds_crs)
else:
tile_bounds_crs = self.src_crs
if self.src_crs != tile_bounds_crs:
reproject_bounds = True # used to transform tb for clip_gdf()
else:
reproject_bounds = False
self.proj_unit = self.src_crs.linear_units
if getattr(self, 'dest_crs', None) is None:
self.dest_crs = self.src_crs
for i, tb in enumerate(tile_bounds):
if self.super_verbose:
print("\n", i, "/", len(tile_bounds))
if reproject_bounds:
tile_gdf = clip_gdf(self.src,
reproject_geometry(box(*tb),
tile_bounds_crs,
self.src_crs),
min_partial_perc,
geom_type, verbose=self.super_verbose)
else:
tile_gdf = clip_gdf(self.src, tb, min_partial_perc, geom_type,
verbose=self.super_verbose)
if self.src_crs != self.dest_crs:
tile_gdf = tile_gdf.to_crs(crs=self.dest_crs.to_wkt())
if split_multi_geoms:
split_multi_geometries(tile_gdf, obj_id_col=obj_id_col)
yield tile_gdf, tb
def search_gdf_polygon(gdf, tile_polygon):
"""Find polygons in a GeoDataFrame that overlap with `tile_polygon` .
Arguments
---------
gdf : :py:class:`geopandas.GeoDataFrame`
A :py:class:`geopandas.GeoDataFrame` of polygons to search.
tile_polygon : :py:class:`shapely.geometry.Polygon`
A :py:class:`shapely.geometry.Polygon` denoting a tile's bounds.
Returns
-------
precise_matches : :py:class:`geopandas.GeoDataFrame`
The subset of `gdf` that overlaps with `tile_polygon` . If
there are no overlaps, this will return an empty
:py:class:`geopandas.GeoDataFrame`.
"""
sindex = gdf.sindex
possible_matches_index = list(sindex.intersection(tile_polygon.bounds))
possible_matches = gdf.iloc[possible_matches_index]
precise_matches = possible_matches[
possible_matches.intersects(tile_polygon)
]
if precise_matches.empty:
precise_matches = gpd.GeoDataFrame(geometry=[])
return precise_matches
def clip_gdf(gdf, tile_bounds, min_partial_perc=0.0, geom_type="Polygon",
use_sindex=True, verbose=False):
"""Clip GDF to a provided polygon.
Clips objects within `gdf` to the region defined by
`poly_to_cut`. Also adds several columns to the output::
`origarea`
The original area of the polygons (only used if `geom_type` ==
``"Polygon"``).
`origlen`
The original length of the objects (only used if `geom_type` ==
``"LineString"``).
`partialDec`
The fraction of the object that remains after clipping
(fraction of area for Polygons, fraction of length for
LineStrings.) Can filter based on this by using `min_partial_perc`.
`truncated`
Boolean indicator of whether or not an object was clipped.
Arguments
---------
gdf : :py:class:`geopandas.GeoDataFrame`
A :py:class:`geopandas.GeoDataFrame` of polygons to clip.
tile_bounds : `list` or :class:`shapely.geometry.Polygon`
The geometry to clip objects in `gdf` to. This can either be a
``[left, top, right, bottom] `` bounds list or a
:class:`shapely.geometry.Polygon` object defining the area to keep.
min_partial_perc : float, optional
The minimum fraction of an object in `gdf` that must be
preserved. Defaults to 0.0 (include any object if any part remains
following clipping).
geom_type : str, optional
Type of objects in `gdf`. Can be one of
``["Polygon", "LineString"]`` . Defaults to ``"Polygon"`` .
use_sindex : bool, optional
Use the `gdf` sindex be used for searching. Improves efficiency
but requires `libspatialindex <http://libspatialindex.github.io/>`__ .
verbose : bool, optional
Switch to print relevant values.
Returns
-------
cut_gdf : :py:class:`geopandas.GeoDataFrame`
`gdf` with all contained objects clipped to `poly_to_cut` .
See notes above for details on additional clipping columns added.
"""
if isinstance(tile_bounds, tuple):
tb = box(*tile_bounds)
elif isinstance(tile_bounds, list):
tb = box(*tile_bounds)
elif isinstance(tile_bounds, Polygon):
tb = tile_bounds
if use_sindex and (geom_type == "Polygon"):
gdf = search_gdf_polygon(gdf, tb)
# if geom_type == "LineString":
if 'origarea' in gdf.columns:
pass
else:
if "geom_type" == "LineString":
gdf['origarea'] = 0
else:
gdf['origarea'] = gdf.area
if 'origlen' in gdf.columns:
pass
else:
if "geom_type" == "LineString":
gdf['origlen'] = gdf.length
else:
gdf['origlen'] = 0
# TODO must implement different case for lines and for spatialIndex
# (Assume RTree is already performed)
cut_gdf = gdf.copy()
cut_gdf.geometry = gdf.intersection(tb)
if geom_type == 'Polygon':
cut_gdf['partialDec'] = cut_gdf.area / cut_gdf['origarea']
cut_gdf = cut_gdf.loc[cut_gdf['partialDec'] > min_partial_perc, :]
cut_gdf['truncated'] = (cut_gdf['partialDec'] != 1.0).astype(int)
else:
# assume linestrings
# remove null
cut_gdf = cut_gdf[cut_gdf['geometry'].notnull()]
cut_gdf['partialDec'] = 1
cut_gdf['truncated'] = 0
# cut_gdf = cut_gdf[cut_gdf.geom_type != "GeometryCollection"]
if len(cut_gdf) > 0 and verbose:
print("clip_gdf() - gdf.iloc[0]:", gdf.iloc[0])
print("clip_gdf() - tb:", tb)
print("clip_gdf() - gdf_cut:", cut_gdf)
# TODO: IMPLEMENT TRUNCATION MEASUREMENT FOR LINESTRINGS
return cut_gdf
| 3-SatShipAI/solaris/tile/vector_tile.py | 14,194 | An object to tile geospatial vector data into smaller pieces.
Arguments
---------
Attributes
----------
Clip GDF to a provided polygon.
Clips objects within `gdf` to the region defined by
`poly_to_cut`. Also adds several columns to the output::
`origarea`
The original area of the polygons (only used if `geom_type` ==
``"Polygon"``).
`origlen`
The original length of the objects (only used if `geom_type` ==
``"LineString"``).
`partialDec`
The fraction of the object that remains after clipping
(fraction of area for Polygons, fraction of length for
LineStrings.) Can filter based on this by using `min_partial_perc`.
`truncated`
Boolean indicator of whether or not an object was clipped.
Arguments
---------
gdf : :py:class:`geopandas.GeoDataFrame`
A :py:class:`geopandas.GeoDataFrame` of polygons to clip.
tile_bounds : `list` or :class:`shapely.geometry.Polygon`
The geometry to clip objects in `gdf` to. This can either be a
``[left, top, right, bottom] `` bounds list or a
:class:`shapely.geometry.Polygon` object defining the area to keep.
min_partial_perc : float, optional
The minimum fraction of an object in `gdf` that must be
preserved. Defaults to 0.0 (include any object if any part remains
following clipping).
geom_type : str, optional
Type of objects in `gdf`. Can be one of
``["Polygon", "LineString"]`` . Defaults to ``"Polygon"`` .
use_sindex : bool, optional
Use the `gdf` sindex be used for searching. Improves efficiency
but requires `libspatialindex <http://libspatialindex.github.io/>`__ .
verbose : bool, optional
Switch to print relevant values.
Returns
-------
cut_gdf : :py:class:`geopandas.GeoDataFrame`
`gdf` with all contained objects clipped to `poly_to_cut` .
See notes above for details on additional clipping columns added.
Find polygons in a GeoDataFrame that overlap with `tile_polygon` .
Arguments
---------
gdf : :py:class:`geopandas.GeoDataFrame`
A :py:class:`geopandas.GeoDataFrame` of polygons to search.
tile_polygon : :py:class:`shapely.geometry.Polygon`
A :py:class:`shapely.geometry.Polygon` denoting a tile's bounds.
Returns
-------
precise_matches : :py:class:`geopandas.GeoDataFrame`
The subset of `gdf` that overlaps with `tile_polygon` . If
there are no overlaps, this will return an empty
:py:class:`geopandas.GeoDataFrame`.
Tile `src` into vector data tiles bounded by `tile_bounds`.
Arguments
---------
src : `str` or :class:`geopandas.GeoDataFrame`
The source vector data to tile. Must either be a path to a GeoJSON
or a :class:`geopandas.GeoDataFrame`.
tile_bounds : list
A :class:`list` made up of ``[left, top, right, bottom] `` sublists
(this can be extracted from
:class:`solaris.tile.raster_tile.RasterTiler` after tiling imagery)
tile_bounds_crs : int, optional
The EPSG code or rasterio.crs.CRS object for the CRS that the tile
bounds are in. RasterTiler.tile returns the CRS of the raster tiles
and can be used here. If not provided, it's assumed that the CRS is the
same as in `src`. This argument must be provided if the bound
coordinates and `src` are not in the same CRS, otherwise tiling will
not occur correctly.
geom_type : str, optional (default: "Polygon")
The type of geometries contained within `src`. Defaults to
``"Polygon"``, can also be ``"LineString"``.
split_multi_geoms : bool, optional (default: True)
Should multi-polygons or multi-linestrings generated by clipping
a geometry into discontinuous pieces be separated? Defaults to yes
(``True``).
min_partial_perc : float, optional (default: 0.0)
The minimum percentage of a :class:`shapely.geometry.Polygon` 's
area or :class:`shapely.geometry.LineString` 's length that must
be retained within a tile's bounds to be included in the output.
Defaults to ``0.0``, meaning that the contained portion of a
clipped geometry will be included, no matter how small.
dest_fname_base : str, optional (default: 'geoms')
The base filename to use when creating outputs. The lower left
corner coordinates of the tile's bounding box will be appended
when saving.
obj_id_col : str, optional (default: None)
If ``split_multi_geoms=True``, the name of a column that specifies
a unique identifier for each geometry (e.g. the ``"BuildingId"``
column in many SpaceNet datasets.) See
:func:`solaris.utils.geo.split_multi_geometries` for more.
output_ext : str, optional, (default: geojson)
Extension of output files, can be 'geojson' or 'json'.
Generate `src` vector data tiles bounded by `tile_bounds`.
Arguments
---------
src : `str` or :class:`geopandas.GeoDataFrame`
The source vector data to tile. Must either be a path to a GeoJSON
or a :class:`geopandas.GeoDataFrame`.
tile_bounds : list
A :class:`list` made up of ``[left, top, right, bottom] `` sublists
(this can be extracted from
:class:`solaris.tile.raster_tile.RasterTiler` after tiling imagery)
tile_bounds_crs : int, optional
The EPSG code for the CRS that the tile bounds are in. If not
provided, it's assumed that the CRS is the same as in `src`. This
argument must be provided if the bound coordinates and `src` are
not in the same CRS, otherwise tiling will not occur correctly.
geom_type : str, optional (default: "Polygon")
The type of geometries contained within `src`. Defaults to
``"Polygon"``, can also be ``"LineString"``.
split_multi_geoms : bool, optional (default: True)
Should multi-polygons or multi-linestrings generated by clipping
a geometry into discontinuous pieces be separated? Defaults to yes
(``True``).
min_partial_perc : float, optional (default: 0.0)
The minimum percentage of a :class:`shapely.geometry.Polygon` 's
area or :class:`shapely.geometry.LineString` 's length that must
be retained within a tile's bounds to be included in the output.
Defaults to ``0.0``, meaning that the contained portion of a
clipped geometry will be included, no matter how small.
obj_id_col : str, optional (default: None)
If ``split_multi_geoms=True``, the name of a column that specifies
a unique identifier for each geometry (e.g. the ``"BuildingId"``
column in many SpaceNet datasets.) See
:func:`solaris.utils.geo.split_multi_geometries` for more.
Yields
------
tile_gdf : :class:`geopandas.GeoDataFrame`
A tile geodataframe.
tb : list
A list with ``[left, top, right, bottom] `` coordinates for the
boundaries contained by `tile_gdf`.
retains the paths of the last call to .tile() check if the tile bounds and vector are in the same crs used to transform tb for clip_gdf() if geom_type == "LineString": TODO must implement different case for lines and for spatialIndex (Assume RTree is already performed) assume linestrings remove null cut_gdf = cut_gdf[cut_gdf.geom_type != "GeometryCollection"] TODO: IMPLEMENT TRUNCATION MEASUREMENT FOR LINESTRINGS | 7,035 | en | 0.583641 |
import os
import argparse
import gym
from gym import envs
import numpy as np
from skimage import transform
from stable_baselines.common.atari_wrappers import WarpFrame
from stable_baselines.common.vec_env import VecVideoRecorder, VecFrameStack, VecNormalize
from .utils import ALGOS, create_test_env, get_saved_hyperparams, get_latest_run_id, find_saved_model
#-----------------------------------------
import toy_simulator
#import dVRL_simulator
from skimage import transform
from gym.spaces import Box
import cv2
#-----------------------------------------
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--env', help='environment ID', type=str, default='CartPole-v1')
parser.add_argument('-f', '--folder', help='Log folder', type=str, default='trained_agents')
parser.add_argument('-o', '--output-folder', help='Output folder', type=str, default='logs/videos/')
parser.add_argument('--algo', help='RL Algorithm', default='ppo2',
type=str, required=False, choices=list(ALGOS.keys()))
parser.add_argument('-n', '--n-timesteps', help='number of timesteps', default=1000,
type=int)
parser.add_argument('--n-envs', help='number of environments', default=1,
type=int)
parser.add_argument('--deterministic', action='store_true', default=False,
help='Use deterministic actions')
parser.add_argument('--seed', help='Random generator seed', type=int, default=0)
parser.add_argument('--no-render', action='store_true', default=False,
help='Do not render the environment (useful for tests)')
parser.add_argument('--exp-id', help='Experiment ID (default: -1, no exp folder, 0: latest)', default=-1,
type=int)
args = parser.parse_args()
env_id = args.env
algo = args.algo
folder = args.folder
video_folder = args.output_folder
seed = args.seed
deterministic = args.deterministic
video_length = args.n_timesteps
n_envs = args.n_envs
if args.exp_id == 0:
args.exp_id = get_latest_run_id(os.path.join(folder, algo), env_id)
print('Loading latest experiment, id={}'.format(args.exp_id))
# Sanity checks
if args.exp_id > 0:
log_path = os.path.join(folder, algo, '{}_{}'.format(env_id, args.exp_id))
else:
log_path = os.path.join(folder, algo)
model_path = find_saved_model(algo, log_path, env_id)
stats_path = os.path.join(log_path, env_id)
hyperparams, stats_path = get_saved_hyperparams(stats_path)
is_atari = 'NoFrameskip' in env_id
env = create_test_env(env_id, n_envs=n_envs, is_atari=is_atari,
stats_path=stats_path, seed=seed, log_dir=None,
should_render=not args.no_render, hyperparams=hyperparams)
#env = RGBobs(env)
model = ALGOS[algo].load(model_path)
obs = env.reset()
#obs = cv2.cvtColor(obs, cv2.COLOR_RGB2GRAY) #ADDED 2
#obs = cv2.resize(obs, (84,84), interpolation=cv2.INTER_AREA) #ADDED 2
#obs_dummy = env.reset() #ADDED 1
#obs = transform.resize(obs_dummy, (84,84)) #ADDED 1
#env.observation_space = Box(low=0, high=255, shape=obs.shape, dtype=np.uint8) #ADDED 1
#obs = obs[:,:, None]*255 #ADDED 1
# Note: apparently it renders by default
env = VecVideoRecorder(env, video_folder,
record_video_trigger=lambda x: x == 0, video_length=video_length,
name_prefix="{}-{}".format(algo, env_id))
env.reset()
for _ in range(video_length + 1):
# action = [env.action_space.sample()]
action, _ = model.predict(obs, deterministic=deterministic)
if isinstance(env.action_space, gym.spaces.Box):
action = np.clip(action, env.action_space.low, env.action_space.high)
obs, _, _, _ = env.step(action)
# Workaround for https://github.com/openai/gym/issues/893
if n_envs == 1 and 'Bullet' not in env_id and not is_atari:
env = env.venv
# DummyVecEnv
while isinstance(env, VecNormalize) or isinstance(env, VecFrameStack):
env = env.venv
env.envs[0].env.close()
else:
# SubprocVecEnv
env.close()
| utils/record_video.py | 4,306 | -----------------------------------------import dVRL_simulator----------------------------------------- Sanity checksenv = RGBobs(env)obs = cv2.cvtColor(obs, cv2.COLOR_RGB2GRAY) ADDED 2obs = cv2.resize(obs, (84,84), interpolation=cv2.INTER_AREA) ADDED 2obs_dummy = env.reset() ADDED 1obs = transform.resize(obs_dummy, (84,84)) ADDED 1env.observation_space = Box(low=0, high=255, shape=obs.shape, dtype=np.uint8) ADDED 1obs = obs[:,:, None]*255 ADDED 1 Note: apparently it renders by default action = [env.action_space.sample()] Workaround for https://github.com/openai/gym/issues/893 DummyVecEnv SubprocVecEnv | 609 | en | 0.48827 |
from __future__ import absolute_import
from __future__ import unicode_literals
#
# Copyright (c) 2014 Juniper Networks, Inc. All rights reserved.
#
from builtins import next
from builtins import chr
from builtins import str
from builtins import range
from builtins import object
import copy
import os
import gevent
from pprint import pformat
import six
from vnc_api import vnc_api
from .exceptions import NoIdError, VncError
from pysandesh.gen_py.sandesh.ttypes import SandeshLevel
from cfgm_common import jsonutils as json
from . import utils
import datetime
from operator import itemgetter
from collections import OrderedDict
from cfgm_common.datastore.drivers.cassandra_thrift import CassandraDriverThrift
from cfgm_common.datastore.drivers.cassandra_cql import CassandraDriverCQL
from cfgm_common.datastore import api as datastore_api
JSON_NONE = json.dumps(None)
class VncCassandraClient(object):
@staticmethod
def _is_metadata(column_name):
return column_name[:5] == 'META:'
@staticmethod
def _is_parent(column_name):
return column_name[:7] == 'parent:'
@staticmethod
def _is_prop(column_name):
return column_name[:5] == 'prop:'
@staticmethod
def _is_prop_list(column_name):
return column_name[:6] == 'propl:'
@staticmethod
def _is_prop_map(column_name):
return column_name[:6] == 'propm:'
@staticmethod
def _is_ref(column_name):
return column_name[:4] == 'ref:'
@staticmethod
def _is_backref(column_name):
return column_name[:8] == 'backref:'
@staticmethod
def _is_children(column_name):
return column_name[:9] == 'children:'
def add(self, cf_name, key, value):
try:
self._cassandra_driver.insert(key, value, cf_name=cf_name)
return True
except Exception as e:
self._logger("VNCCassandra, unable to add {}={}, error: {}".format(
key, value, e), level=SandeshLevel.SYS_WARN)
return False
def delete(self, cf_name, key, columns=None):
try:
self._cassandra_driver.remove(
key, columns, cf_name=cf_name)
return True
except Exception as e:
self._logger("VNCCassandra, unable to del {}={}, error: {}".format(
key, columns, e), level=SandeshLevel.SYS_WARN)
return False
def _get_resource_class(self, obj_type):
if hasattr(self, '_db_client_mgr'):
return self._db_client_mgr.get_resource_class(obj_type)
cls_name = '%s' % (utils.CamelCase(obj_type))
return getattr(vnc_api, cls_name)
# end _get_resource_class
@classmethod
def get_db_info(cls):
db_info = [(datastore_api.UUID_KEYSPACE_NAME, [datastore_api.OBJ_UUID_CF_NAME,
datastore_api.OBJ_FQ_NAME_CF_NAME,
datastore_api.OBJ_SHARED_CF_NAME])]
return db_info
# end get_db_info
def __init__(self, server_list, cassandra_driver, **options):
if cassandra_driver == 'cql':
driverClass = CassandraDriverCQL
elif cassandra_driver == 'thrift':
driverClass = CassandraDriverThrift
# TODO(sahid): To satisfy test-framework which has its
# specific py3 support for thrift we can have the above
# condition, when that will be fixed we could uncomment
# the code.
#if six.PY3:
# raise VncError(
# "selected driver `{}` not supported for Python 3.".format(
# cassandra_driver))
else:
raise VncError(
"datastore driver not selected, see `cassandra_driver`.")
self._cassandra_driver = driverClass(server_list, **options)
self._logger = self._cassandra_driver.options.logger
self._logger('VNCCassandra started with driver {}'.format(driverClass),
level=SandeshLevel.SYS_INFO)
self._cache_uuid_to_fq_name = {}
self._obj_cache_mgr = ObjectCacheManager(
self._cassandra_driver.options.logger,
self,
max_entries=self._cassandra_driver.options.obj_cache_entries,
obj_cache_exclude_types=self._cassandra_driver.options.obj_cache_exclude_types,
debug_obj_cache_types=self._cassandra_driver.options.debug_obj_cache_types,
)
self._obj_cache_exclude_types = self._cassandra_driver.options.obj_cache_exclude_types or []
# these functions make calls to pycassa xget() and get_range()
# generator functions which can't be wrapped around handle_exceptions()
# at the time of cassandra init, hence need to wrap these functions that
# uses it to catch cassandra connection failures.
self.object_update = self._cassandra_driver._handle_exceptions(
self.object_update)
self.object_list = self._cassandra_driver._handle_exceptions(
self.object_list)
self.object_read = self._cassandra_driver._handle_exceptions(
self.object_read)
self.object_raw_read = self._cassandra_driver._handle_exceptions(
self.object_raw_read)
self.object_delete = self._cassandra_driver._handle_exceptions(
self.object_delete)
self.prop_collection_read = self._cassandra_driver._handle_exceptions(
self.prop_collection_read)
self.uuid_to_fq_name = self._cassandra_driver._handle_exceptions(
self.uuid_to_fq_name)
self.uuid_to_obj_type = self._cassandra_driver._handle_exceptions(
self.uuid_to_obj_type)
self.fq_name_to_uuid = self._cassandra_driver._handle_exceptions(
self.fq_name_to_uuid)
self.get_shared = self._cassandra_driver._handle_exceptions(
self.get_shared)
self.walk = self._cassandra_driver._handle_exceptions(self.walk)
if self._cassandra_driver.options.walk:
self.walk()
# end __init__
def _create_prop(self, bch, obj_uuid, prop_name, prop_val):
self._cassandra_driver.insert(
obj_uuid,
{'prop:%s' % (prop_name): json.dumps(prop_val)},
batch=bch)
# end _create_prop
def _update_prop(self, bch, obj_uuid, prop_name, new_props):
if new_props[prop_name] is None:
self._cassandra_driver.remove(obj_uuid,
columns=['prop:' + prop_name],
batch=bch)
else:
self._cassandra_driver.insert(
obj_uuid,
{'prop:' + prop_name: json.dumps(new_props[prop_name])},
batch=bch)
# prop has been accounted for, remove so only new ones remain
del new_props[prop_name]
# end _update_prop
def _add_to_prop_list(self, bch, obj_uuid, prop_name,
prop_elem_value, prop_elem_position):
self._cassandra_driver.insert(obj_uuid,
{'propl:%s:%s' % (prop_name, prop_elem_position):
json.dumps(prop_elem_value)},
batch=bch)
# end _add_to_prop_list
def _delete_from_prop_list(self, bch, obj_uuid, prop_name,
prop_elem_position):
self._cassandra_driver.remove(
obj_uuid,
columns=['propl:%s:%s' % (prop_name, prop_elem_position)],
batch=bch)
# end _delete_from_prop_list
def _set_in_prop_map(self, bch, obj_uuid, prop_name,
prop_elem_value, prop_elem_position):
self._cassandra_driver.insert(obj_uuid,
{'propm:%s:%s' % (prop_name, prop_elem_position):
json.dumps(prop_elem_value)},
batch=bch)
# end _set_in_prop_map
def _delete_from_prop_map(self, bch, obj_uuid, prop_name,
prop_elem_position):
self._cassandra_driver.remove(
obj_uuid,
columns=['propm:%s:%s' % (prop_name, prop_elem_position)],
batch=bch)
# end _delete_from_prop_map
def _create_child(self, bch, parent_type, parent_uuid,
child_type, child_uuid):
child_col = {'children:%s:%s' %
(child_type, child_uuid): JSON_NONE}
self._cassandra_driver.insert(parent_uuid, child_col, batch=bch)
parent_col = {'parent:%s:%s' %
(parent_type, parent_uuid): JSON_NONE}
self._cassandra_driver.insert(child_uuid, parent_col, batch=bch)
# update latest_col_ts on parent object
if parent_type not in self._obj_cache_exclude_types:
self.update_latest_col_ts(bch, parent_uuid)
# end _create_child
def _delete_child(self, bch, parent_type, parent_uuid,
child_type, child_uuid):
self._cassandra_driver.remove(
parent_uuid,
columns=['children:%s:%s' % (child_type, child_uuid)],
batch=bch)
# update latest_col_ts on parent object
if parent_type not in self._obj_cache_exclude_types:
self.update_latest_col_ts(bch, parent_uuid)
# end _delete_child
def _create_ref(self, bch, obj_type, obj_uuid, ref_obj_type, ref_uuid,
ref_data):
j_ref_data = json.dumps(ref_data)
symmetric_ref_updates = []
self._cassandra_driver.insert(
obj_uuid, {'ref:%s:%s' %
(ref_obj_type, ref_uuid): j_ref_data},
batch=bch)
if obj_type == ref_obj_type:
self._cassandra_driver.insert(
ref_uuid, {'ref:%s:%s' %
(obj_type, obj_uuid): j_ref_data},
batch=bch)
self.update_last_modified(bch, obj_type, ref_uuid)
symmetric_ref_updates = [ref_uuid]
else:
self._cassandra_driver.insert(
ref_uuid, {'backref:%s:%s' %
(obj_type, obj_uuid): j_ref_data},
batch=bch)
# update latest_col_ts on referred object
if ref_obj_type not in self._obj_cache_exclude_types:
if ref_obj_type == obj_type:
# evict other side of ref since it is stale from
# GET /<old-ref-uuid> pov.
self._obj_cache_mgr.evict(obj_type, [ref_uuid])
else:
self.update_latest_col_ts(bch, ref_uuid)
return symmetric_ref_updates
# end _create_ref
def _update_ref(self, bch, obj_type, obj_uuid, ref_obj_type, old_ref_uuid,
new_ref_infos):
if ref_obj_type not in new_ref_infos:
# update body didn't touch this type, nop
return []
symmetric_ref_updates = []
if old_ref_uuid not in new_ref_infos[ref_obj_type]:
# remove old ref
self._cassandra_driver.remove(
obj_uuid,
columns=['ref:%s:%s' % (ref_obj_type, old_ref_uuid)],
batch=bch)
if obj_type == ref_obj_type:
self._cassandra_driver.remove(
old_ref_uuid,
columns=['ref:%s:%s' % (obj_type, obj_uuid)],
batch=bch)
try:
self.update_last_modified(bch, obj_type, old_ref_uuid)
symmetric_ref_updates = [old_ref_uuid]
except NoIdError as e:
# old_ref_uuid might have been deleted
# if cache has the link, it will be evicted
# if cache doesn't have, keyerror is caught and continued
pass
else:
self._cassandra_driver.remove(
old_ref_uuid,
columns=['backref:%s:%s' % (obj_type, obj_uuid)],
batch=bch)
else:
# retain old ref with new ref attr
new_ref_data = new_ref_infos[ref_obj_type][old_ref_uuid]
j_new_ref_data = json.dumps(new_ref_data)
self._cassandra_driver.insert(
obj_uuid,
{'ref:%s:%s' % (ref_obj_type, old_ref_uuid):
j_new_ref_data},
batch=bch)
if obj_type == ref_obj_type:
self._cassandra_driver.insert(
old_ref_uuid,
{'ref:%s:%s' % (obj_type, obj_uuid):
j_new_ref_data},
batch=bch)
self.update_last_modified(bch, obj_type, old_ref_uuid)
symmetric_ref_updates = [old_ref_uuid]
else:
self._cassandra_driver.insert(
old_ref_uuid,
{'backref:%s:%s' % (obj_type, obj_uuid):
j_new_ref_data},
batch=bch)
# uuid has been accounted for, remove so only new ones remain
del new_ref_infos[ref_obj_type][old_ref_uuid]
# update latest_col_ts on referred object
if ref_obj_type not in self._obj_cache_exclude_types:
if ref_obj_type == obj_type:
# evict other side of ref since it is stale from
# GET /<old-ref-uuid> pov.
self._obj_cache_mgr.evict(obj_type, [old_ref_uuid])
else:
self.update_latest_col_ts(bch, old_ref_uuid)
return symmetric_ref_updates
# end _update_ref
def _delete_ref(self, bch, obj_type, obj_uuid, ref_obj_type, ref_uuid):
send = False
symmetric_ref_updates = []
if bch is None:
send = True
bch = self._cassandra_driver.get_cf_batch(datastore_api.OBJ_UUID_CF_NAME)
self._cassandra_driver.remove(
obj_uuid,
columns=['ref:%s:%s' % (ref_obj_type, ref_uuid)],
batch=bch)
if obj_type == ref_obj_type:
self._cassandra_driver.remove(ref_uuid, columns=[
'ref:%s:%s' % (obj_type, obj_uuid)],
batch=bch)
try:
self.update_last_modified(bch, obj_type, ref_uuid)
symmetric_ref_updates = [ref_uuid]
except NoIdError as e:
# ref_uuid might have been deleted
# if cache has the link, it will be evicted
# if cache doesn't have, keyerror is caught and continued
pass
else:
self._cassandra_driver.remove(
ref_uuid,
columns=['backref:%s:%s' % (obj_type, obj_uuid)],
batch=bch)
# update latest_col_ts on referred object
if ref_obj_type not in self._obj_cache_exclude_types:
if ref_obj_type == obj_type:
# evict other side of ref since it is stale from
# GET /<old-ref-uuid> pov.
self._obj_cache_mgr.evict(obj_type, [ref_uuid])
else:
self.update_latest_col_ts(bch, ref_uuid)
if send:
bch.send()
return symmetric_ref_updates
# end _delete_ref
def _get_xsd_class(self, xsd_type):
return getattr(vnc_api, xsd_type)
# end _get_xsd_class
def object_create(self, obj_type, obj_id, obj_dict,
uuid_batch=None, fqname_batch=None):
obj_class = self._get_resource_class(obj_type)
if uuid_batch:
bch = uuid_batch
else:
# Gather column values for obj and updates to backrefs
# in a batch and write it at the end
bch = self._cassandra_driver.get_cf_batch(datastore_api.OBJ_UUID_CF_NAME)
obj_cols = {}
obj_cols['fq_name'] = json.dumps(obj_dict['fq_name'])
obj_cols['type'] = json.dumps(obj_type)
if obj_type not in self._obj_cache_exclude_types:
obj_cols['META:latest_col_ts'] = JSON_NONE
if 'parent_type' in obj_dict:
# non config-root child
parent_type = obj_dict['parent_type']
if parent_type not in obj_class.parent_types:
msg = ("Invalid parent type: %s not in %s" %
(parent_type, obj_class.parent_types))
return False, (400, msg)
parent_object_type = self._get_resource_class(
parent_type).object_type
parent_fq_name = obj_dict['fq_name'][:-1]
obj_cols['parent_type'] = json.dumps(parent_type)
parent_uuid = self.fq_name_to_uuid(parent_object_type,
parent_fq_name)
self._create_child(bch, parent_object_type, parent_uuid, obj_type,
obj_id)
# Properties
for prop_field in obj_class.prop_fields:
field = obj_dict.get(prop_field)
# Specifically checking for None
if field is None:
continue
if prop_field == 'id_perms':
field['created'] = datetime.datetime.utcnow().isoformat()
field['last_modified'] = field['created']
if prop_field in obj_class.prop_list_fields:
# store list elements in list order
# iterate on wrapped element or directly or prop field
if obj_class.prop_list_field_has_wrappers[prop_field]:
wrapper_field_keys = list(field.keys())
if wrapper_field_keys:
wrapper_field = wrapper_field_keys[0]
list_coll = field[wrapper_field]
else:
list_coll = []
else:
list_coll = field
for i in range(len(list_coll)):
self._add_to_prop_list(
bch, obj_id, prop_field, list_coll[i], str(i))
elif prop_field in obj_class.prop_map_fields:
# iterate on wrapped element or directly or prop field
if obj_class.prop_map_field_has_wrappers[prop_field]:
wrapper_field_keys = list(field.keys())
if wrapper_field_keys:
wrapper_field = wrapper_field_keys[0]
map_coll = field[wrapper_field]
else:
map_coll = []
else:
map_coll = field
map_key_name = obj_class.prop_map_field_key_names[prop_field]
for map_elem in map_coll:
map_key = map_elem[map_key_name]
self._set_in_prop_map(
bch, obj_id, prop_field, map_elem, map_key)
else:
self._create_prop(bch, obj_id, prop_field, field)
# References
# e.g. ref_field = 'network_ipam_refs'
# ref_res_type = 'network-ipam'
# ref_link_type = 'VnSubnetsType'
# is_weakref = False
symmetric_ref_updates = []
for ref_field in obj_class.ref_fields:
ref_fld_types_list = list(obj_class.ref_field_types[ref_field])
ref_res_type = ref_fld_types_list[0]
ref_link_type = ref_fld_types_list[1]
ref_obj_type = self._get_resource_class(ref_res_type).object_type
refs = obj_dict.get(ref_field, [])
for ref in refs:
ref_uuid = self.fq_name_to_uuid(ref_obj_type, ref['to'])
ref_attr = ref.get('attr')
ref_data = {'attr': ref_attr, 'is_weakref': False}
ret = self._create_ref(bch, obj_type, obj_id, ref_obj_type, ref_uuid,
ref_data)
symmetric_ref_updates.extend(ret)
self._cassandra_driver.insert(obj_id, obj_cols, batch=bch)
if not uuid_batch:
bch.send()
# Update fqname table
fq_name_str = ':'.join(obj_dict['fq_name'])
fq_name_cols = {utils.encode_string(fq_name_str) + ':' + obj_id:
JSON_NONE}
if fqname_batch:
fqname_batch.insert(obj_type, fq_name_cols)
else:
self._cassandra_driver.insert(
cf_name=datastore_api.OBJ_FQ_NAME_CF_NAME,
key=obj_type,
columns=fq_name_cols)
return (True, symmetric_ref_updates)
# end object_create
def object_raw_read(self, obj_type, obj_uuids, prop_names):
obj_class = self._get_resource_class(obj_type)
hit_obj_dicts, miss_uuids = self._obj_cache_mgr.read(
obj_class, obj_uuids, prop_names, False)
miss_obj_rows = self._cassandra_driver.multiget(
datastore_api.OBJ_UUID_CF_NAME, miss_uuids,
['prop:' + x for x in prop_names])
miss_obj_dicts = []
for obj_uuid, columns in list(miss_obj_rows.items()):
miss_obj_dict = {'uuid': obj_uuid}
for prop_name in columns:
# strip 'prop:' before sending result back
miss_obj_dict[prop_name[5:]] = columns[prop_name]
miss_obj_dicts.append(miss_obj_dict)
return hit_obj_dicts + miss_obj_dicts
def object_read(self, obj_type, obj_uuids, field_names=None,
ret_readonly=False):
if not obj_uuids:
return (True, [])
# if field_names=None, all fields will be read/returned
req_fields = field_names
obj_class = self._get_resource_class(obj_type)
ref_fields = obj_class.ref_fields
backref_fields = obj_class.backref_fields
children_fields = obj_class.children_fields
list_fields = obj_class.prop_list_fields
map_fields = obj_class.prop_map_fields
prop_fields = obj_class.prop_fields - (list_fields | map_fields)
if ((ret_readonly is False) or
(obj_type in self._obj_cache_exclude_types)):
ignore_cache = True
else:
ignore_cache = False
# optimize for common case of reading non-backref, non-children fields
# ignoring columns starting from 'b' and 'c' - significant performance
# impact in scaled setting. e.g. read of project
# For caching (when ret values will be used for readonly
# e.g. object read/list context):
# 1. pick the hits, and for the misses..
# 2. read from db, cache, filter with fields
# else read from db with specified field filters
if (field_names is None or
set(field_names) & (backref_fields | children_fields)):
# atleast one backref/children field is needed
include_backrefs_children = True
if ignore_cache:
hit_obj_dicts = []
miss_uuids = obj_uuids
else:
hit_obj_dicts, miss_uuids = self._obj_cache_mgr.read(
obj_class,
obj_uuids,
field_names,
include_backrefs_children,
)
miss_obj_rows = self._cassandra_driver.multiget(
datastore_api.OBJ_UUID_CF_NAME, miss_uuids,
timestamp=True)
else:
# ignore reading backref + children columns
include_backrefs_children = False
if ignore_cache:
hit_obj_dicts = []
miss_uuids = obj_uuids
else:
hit_obj_dicts, miss_uuids = self._obj_cache_mgr.read(
obj_class,
obj_uuids,
field_names,
include_backrefs_children,
)
miss_obj_rows = self._cassandra_driver.multiget(
datastore_api.OBJ_UUID_CF_NAME,
miss_uuids,
start='d',
timestamp=True)
if (ignore_cache or
self._obj_cache_mgr.max_entries < len(miss_uuids)):
# caller may modify returned value, or
# cannot fit in cache,
# just render with filter and don't cache
rendered_objs = self._render_obj_from_db(
obj_class, miss_obj_rows, req_fields,
include_backrefs_children)
obj_dicts = hit_obj_dicts + \
[v['obj_dict'] for k,v in list(rendered_objs.items())]
else:
# can fit and caller won't modify returned value,
# so render without filter, cache and return
# cached value
rendered_objs_to_cache = self._render_obj_from_db(
obj_class, miss_obj_rows, None,
include_backrefs_children)
field_filtered_objs = self._obj_cache_mgr.set(
obj_type,
rendered_objs_to_cache,
req_fields,
include_backrefs_children,
)
obj_dicts = hit_obj_dicts + field_filtered_objs
if not obj_dicts:
if len(obj_uuids) == 1:
raise NoIdError(obj_uuids[0])
else:
return (True, [])
return (True, obj_dicts)
# end object_read
def object_count_children(self, obj_type, obj_uuid, child_type):
if child_type is None:
return (False, '')
obj_class = self._get_resource_class(obj_type)
if child_type not in obj_class.children_fields:
return (False,
'%s is not a child type of %s' % (child_type, obj_type))
col_start = 'children:' + child_type[:-1] + ':'
col_finish = 'children:' + child_type[:-1] + ';'
num_children = self._cassandra_driver.get_count(
datastore_api.OBJ_UUID_CF_NAME,
obj_uuid,
start=col_start,
finish=col_finish)
return (True, num_children)
# end object_count_children
def update_last_modified(self, bch, obj_type, obj_uuid, id_perms=None):
if id_perms is None:
id_perms = self._cassandra_driver.get_one_col(
datastore_api.OBJ_UUID_CF_NAME,
obj_uuid,
'prop:id_perms')
id_perms['last_modified'] = datetime.datetime.utcnow().isoformat()
self._update_prop(bch, obj_uuid, 'id_perms', {'id_perms': id_perms})
if obj_type not in self._obj_cache_exclude_types:
self.update_latest_col_ts(bch, obj_uuid)
# end update_last_modified
def update_latest_col_ts(self, bch, obj_uuid):
try:
self._cassandra_driver.get_one_col(datastore_api.OBJ_UUID_CF_NAME,
obj_uuid,
'type')
except NoIdError:
return
self._cassandra_driver.insert(obj_uuid,
{'META:latest_col_ts':
JSON_NONE},
batch=bch)
# end update_latest_col_ts
def object_update(self, obj_type, obj_uuid, new_obj_dict, uuid_batch=None):
obj_class = self._get_resource_class(obj_type)
# Grab ref-uuids and properties in new version
new_ref_infos = {}
symmetric_ref_updates = []
# Properties
new_props = {}
for prop_field in obj_class.prop_fields:
if prop_field in new_obj_dict:
new_props[prop_field] = new_obj_dict[prop_field]
# References
# e.g. ref_field = 'network_ipam_refs'
# ref_type = 'network-ipam'
# ref_link_type = 'VnSubnetsType'
# is_weakref = False
for ref_field in obj_class.ref_fields:
ref_fld_types_list = list(obj_class.ref_field_types[ref_field])
ref_res_type = ref_fld_types_list[0]
ref_link_type = ref_fld_types_list[1]
is_weakref = ref_fld_types_list[2]
ref_obj_type = self._get_resource_class(ref_res_type).object_type
if ref_field in new_obj_dict:
new_refs = new_obj_dict[ref_field]
new_ref_infos[ref_obj_type] = {}
for new_ref in new_refs or []:
try:
new_ref_uuid = new_ref['uuid']
except KeyError:
new_ref_uuid = self.fq_name_to_uuid(ref_obj_type,
new_ref['to'])
new_ref_attr = new_ref.get('attr')
new_ref_data = {'attr': new_ref_attr,
'is_weakref': is_weakref}
new_ref_infos[ref_obj_type][new_ref_uuid] = new_ref_data
# Gather column values for obj and updates to backrefs
# in a batch and write it at the end
if uuid_batch:
bch = uuid_batch
else:
bch = self._cassandra_driver.get_cf_batch(
datastore_api.OBJ_UUID_CF_NAME)
for col_name, col_value in self._cassandra_driver.xget(
datastore_api.OBJ_UUID_CF_NAME, obj_uuid):
if self._is_prop(col_name):
(_, prop_name) = col_name.split(':')
if prop_name == 'id_perms':
# id-perms always has to be updated for last-mod timestamp
# get it from request dict(or from db if not in request dict)
new_id_perms = new_obj_dict.get(
prop_name, json.loads(col_value))
self.update_last_modified(
bch, obj_type, obj_uuid, new_id_perms)
elif prop_name in new_obj_dict:
self._update_prop(
bch, obj_uuid, prop_name, new_props)
if self._is_prop_list(col_name):
(_, prop_name, prop_elem_position) = col_name.split(':', 2)
if prop_name in new_props:
# delete all old values of prop list
self._delete_from_prop_list(
bch, obj_uuid, prop_name, prop_elem_position)
if self._is_prop_map(col_name):
(_, prop_name, prop_elem_position) = col_name.split(':', 2)
if prop_name in new_props:
# delete all old values of prop list
self._delete_from_prop_map(
bch, obj_uuid, prop_name, prop_elem_position)
if self._is_ref(col_name):
(_, ref_type, ref_uuid) = col_name.split(':')
ret = self._update_ref(bch, obj_type, obj_uuid, ref_type,
ref_uuid, new_ref_infos)
symmetric_ref_updates.extend(ret)
# for all column names
# create new refs
for ref_type in list(new_ref_infos.keys()):
for ref_uuid in list(new_ref_infos[ref_type].keys()):
ref_data = new_ref_infos[ref_type][ref_uuid]
ret = self._create_ref(bch, obj_type, obj_uuid, ref_type,
ref_uuid, ref_data)
symmetric_ref_updates.extend(ret)
# create new props
for prop_name in list(new_props.keys()):
if prop_name in obj_class.prop_list_fields:
# store list elements in list order
# iterate on wrapped element or directly on prop field
# for wrapped lists, store without the wrapper. regenerate
# wrapper on read
if (obj_class.prop_list_field_has_wrappers[prop_name] and
new_props[prop_name]):
wrapper_field = list(new_props[prop_name].keys())[0]
list_coll = new_props[prop_name][wrapper_field]
else:
list_coll = new_props[prop_name]
for i in range(len(list_coll)):
self._add_to_prop_list(bch, obj_uuid, prop_name,
list_coll[i], str(i))
elif prop_name in obj_class.prop_map_fields:
# store map elements in key order
# iterate on wrapped element or directly on prop field
# for wrapped lists, store without the wrapper. regenerate
# wrapper on read
if (obj_class.prop_map_field_has_wrappers[prop_name] and
new_props[prop_name]):
wrapper_field = list(new_props[prop_name].keys())[0]
map_coll = new_props[prop_name][wrapper_field]
else:
map_coll = new_props[prop_name]
map_key_name = obj_class.prop_map_field_key_names[prop_name]
for map_elem in map_coll:
map_key = map_elem[map_key_name]
self._set_in_prop_map(bch, obj_uuid, prop_name,
map_elem, map_key)
else:
self._create_prop(bch, obj_uuid, prop_name, new_props[prop_name])
if not uuid_batch:
try:
bch.send()
finally:
self._obj_cache_mgr.evict(obj_type, [obj_uuid])
return (True, symmetric_ref_updates)
# end object_update
def object_list(self, obj_type, parent_uuids=None, back_ref_uuids=None,
obj_uuids=None, count=False, filters=None,
paginate_start=None, paginate_count=None):
obj_class = self._get_resource_class(obj_type)
children_fq_names_uuids = []
ret_marker = None
anchored_op = True
def filter_rows(coll_infos, filters=None):
if not coll_infos or not filters:
return coll_infos
filtered_infos = {}
columns = ['prop:%s' % filter_key for filter_key in filters if
filter_key in obj_class.prop_fields]
if not columns:
return coll_infos
rows = self._cassandra_driver.multiget(datastore_api.OBJ_UUID_CF_NAME,
list(coll_infos.keys()),
columns=columns)
for obj_uuid, properties in list(rows.items()):
# give chance for zk heartbeat/ping
gevent.sleep(0)
full_match = True
for filter_key, filter_values in list(filters.items()):
property = 'prop:%s' % filter_key
if property not in properties:
full_match = False
break
prop_value = properties[property]
if isinstance(prop_value, dict):
for filter_value in filter_values:
try:
filter_dict = json.loads(filter_value)
except ValueError:
continue
if (six.viewitems(filter_dict) <=
six.viewitems(prop_value)):
break
else:
full_match = False
break
elif prop_value not in filter_values:
full_match = False
break
if full_match:
filtered_infos[obj_uuid] = coll_infos[obj_uuid]
return filtered_infos
# end filter_rows
def get_fq_name_uuid_list(obj_uuids):
ret_list = []
for obj_uuid in obj_uuids:
try:
if obj_type != self.uuid_to_obj_type(obj_uuid):
continue
obj_fq_name = self.uuid_to_fq_name(obj_uuid)
ret_list.append((obj_fq_name, obj_uuid))
except NoIdError:
pass
return ret_list
# end get_fq_name_uuid_list
if parent_uuids:
# go from parent to child
## tune start and count if paginated on same row
#if paginate_start and (len(parent_uuids) == 1):
if paginate_start and paginate_start != '0':
start = 'children:%s:%s' % (obj_type,
paginate_start[:-1]+chr(ord(paginate_start[-1])+1))
num_columns = paginate_count
else:
start = 'children:%s:' % (obj_type)
num_columns = None
obj_rows = self._cassandra_driver.multiget(
datastore_api.OBJ_UUID_CF_NAME,
parent_uuids,
start=start,
finish='children:%s;' % (obj_type),
num_columns=num_columns,
timestamp=True)
def filter_rows_parent_anchor(sort=False):
# flatten to [('children:<type>:<uuid>', (<val>,<ts>), *]
all_cols = [cols for obj_key in list(obj_rows.keys())
for cols in list(obj_rows[obj_key].items())]
all_child_infos = {}
for col_name, col_val_ts in all_cols:
# give chance for zk heartbeat/ping
gevent.sleep(0)
child_uuid = col_name.split(':')[2]
if obj_uuids and child_uuid not in obj_uuids:
continue
if back_ref_uuids:
child_cols = self._cassandra_driver.get(
datastore_api.OBJ_UUID_CF_NAME,
child_uuid,
start='ref:',
finish='ref;')
child_ref_ids = {col.split(':')[2]
for col in child_cols or []}
if not set(back_ref_uuids) & child_ref_ids:
continue
all_child_infos[child_uuid] = {'uuid': child_uuid,
'tstamp': col_val_ts[1]}
filt_child_infos = filter_rows(all_child_infos, filters)
if not sort:
ret_child_infos = list(filt_child_infos.values())
else:
ret_child_infos = sorted(list(filt_child_infos.values()),
key=itemgetter('tstamp'))
return get_fq_name_uuid_list(r['uuid'] for r in ret_child_infos)
# end filter_rows_parent_anchor
children_fq_names_uuids.extend(filter_rows_parent_anchor(sort=True))
elif back_ref_uuids:
# go from anchor to backrefs
if paginate_start and paginate_start != '0':
# get next lexical value of marker
start = 'backref:%s:%s' % (obj_type,
paginate_start[:-1]+chr(ord(paginate_start[-1])+1))
num_columns = paginate_count
else:
start = 'backref:%s:' % (obj_type)
num_columns = None
obj_rows = self._cassandra_driver.multiget(
datastore_api.OBJ_UUID_CF_NAME,
back_ref_uuids,
start=start,
finish='backref:%s;' % (obj_type),
num_columns=num_columns,
timestamp=True)
def filter_rows_backref_anchor():
# flatten to [('backref:<obj-type>:<uuid>', (<val>,<ts>), *]
all_cols = [cols for obj_key in list(obj_rows.keys())
for cols in list(obj_rows[obj_key].items())]
all_backref_infos = {}
for col_name, col_val_ts in all_cols:
# give chance for zk heartbeat/ping
gevent.sleep(0)
backref_uuid = col_name.split(':')[2]
if obj_uuids and backref_uuid not in obj_uuids:
continue
all_backref_infos[backref_uuid] = \
{'uuid': backref_uuid, 'tstamp': col_val_ts[1]}
filt_backref_infos = filter_rows(all_backref_infos, filters)
return get_fq_name_uuid_list(r['uuid'] for r in
list(filt_backref_infos.values()))
# end filter_rows_backref_anchor
children_fq_names_uuids.extend(filter_rows_backref_anchor())
else:
anchored_op = False
if obj_uuids:
# exact objects specified
def filter_rows_object_list():
all_obj_infos = {}
marker = None
read_in = 0
start_idx = 0
if paginate_start and paginate_start != '0':
# paginate through objects
# in list order of obj_uuids
try:
start_idx = obj_uuids.index(paginate_start) + 1
except ValueError:
# simulate end of pagination
start_idx = len(obj_uuids)
for obj_uuid in obj_uuids[start_idx:]:
all_obj_infos[obj_uuid] = None
read_in += 1
if paginate_start and read_in >= paginate_count:
marker = obj_uuid
break
filt_obj_infos = filter_rows(all_obj_infos, filters)
return get_fq_name_uuid_list(list(filt_obj_infos.keys())), marker
# end filter_rows_object_list
filtered_rows, ret_marker = filter_rows_object_list()
children_fq_names_uuids.extend(filtered_rows)
else: # grab all resources of this type
if paginate_start and paginate_start != '0':
start = paginate_start[:-1] + \
chr(ord(paginate_start[-1]) + 1)
else:
start = ''
cols = self._cassandra_driver.xget(
datastore_api.OBJ_FQ_NAME_CF_NAME, '%s' %(obj_type),
start=start)
def filter_rows_no_anchor():
marker = None
all_obj_infos = {}
read_in = 0
for col_name, _ in cols:
# give chance for zk heartbeat/ping
gevent.sleep(0)
col_name_arr = utils.decode_string(col_name).split(':')
obj_uuid = col_name_arr[-1]
all_obj_infos[obj_uuid] = (col_name_arr[:-1], obj_uuid)
read_in += 1
if paginate_start and read_in >= paginate_count:
marker = col_name
break
filt_obj_infos = filter_rows(all_obj_infos, filters)
return list(filt_obj_infos.values()), marker
# end filter_rows_no_anchor
if count and not filters:
# when listing all objects of a type
# return early if only count query is in request
return (True, sum(1 for col in cols), None)
filtered_rows, ret_marker = filter_rows_no_anchor()
children_fq_names_uuids.extend(filtered_rows)
if count:
return (True, len(children_fq_names_uuids), None)
# for anchored list with pagination,
# prune from union of anchors and last uuid is marker
if paginate_start and anchored_op:
children_fq_names_uuids = sorted(children_fq_names_uuids,
key=lambda fqn_uuid: fqn_uuid[1])
if len(children_fq_names_uuids) > paginate_count:
children_fq_names_uuids = children_fq_names_uuids[:paginate_count]
if not children_fq_names_uuids:
ret_marker = None
else:
ret_marker = children_fq_names_uuids[-1][1]
return (True, children_fq_names_uuids, ret_marker)
# end object_list
def object_delete(self, obj_type, obj_uuid):
obj_class = self._get_resource_class(obj_type)
fq_name = self._cassandra_driver.get_one_col(datastore_api.OBJ_UUID_CF_NAME,
obj_uuid,
'fq_name')
bch = self._cassandra_driver.get_cf_batch(datastore_api.OBJ_UUID_CF_NAME)
# unlink from parent
col_start = 'parent:'
col_fin = 'parent;'
col_name_iter = self._cassandra_driver.xget(datastore_api.OBJ_UUID_CF_NAME,
obj_uuid, start=col_start, finish=col_fin)
for (col_name, col_val) in col_name_iter:
(_, parent_type, parent_uuid) = col_name.split(':')
self._delete_child(
bch, parent_type, parent_uuid, obj_type, obj_uuid)
# remove refs
col_start = 'ref:'
col_fin = 'ref;'
col_name_iter = self._cassandra_driver.xget(datastore_api.OBJ_UUID_CF_NAME,
obj_uuid, start=col_start, finish=col_fin)
symmetric_ref_updates = []
for (col_name, col_val) in col_name_iter:
(_, ref_type, ref_uuid) = col_name.split(':')
ret = self._delete_ref(bch, obj_type, obj_uuid, ref_type, ref_uuid)
symmetric_ref_updates.extend(ret)
# remove link from relaxed back refs
col_start = 'relaxbackref:'
col_fin = 'relaxbackref;'
col_name_iter = self._cassandra_driver.xget(datastore_api.OBJ_UUID_CF_NAME,
obj_uuid, start=col_start, finish=col_fin)
for (col_name, col_val) in col_name_iter:
(_, backref_uuid) = col_name.split(':')
self._delete_ref(bch, None, backref_uuid, obj_type, obj_uuid)
self._cassandra_driver.remove(obj_uuid, batch=bch)
try:
bch.send()
finally:
self._obj_cache_mgr.evict(obj_type, [obj_uuid])
# Update fqname table
fq_name_str = ':'.join(fq_name)
fq_name_col = utils.encode_string(fq_name_str) + ':' + obj_uuid
self._cassandra_driver.remove(
cf_name=datastore_api.OBJ_FQ_NAME_CF_NAME,
key=obj_type,
columns=[fq_name_col])
# Purge map naming cache
self.cache_uuid_to_fq_name_del(obj_uuid)
return (True, symmetric_ref_updates)
# end object_delete
def prop_collection_read(self, obj_type, obj_uuid, obj_fields, position):
obj_class = self._get_resource_class(obj_type)
result = {}
# always read-in id-perms for upper-layers to do rbac/visibility
result['id_perms'] = self._cassandra_driver.get_one_col(
datastore_api.OBJ_UUID_CF_NAME,
obj_uuid,
'prop:id_perms')
# read in prop-list or prop-map fields
for field in obj_fields:
if field in obj_class.prop_list_fields:
prop_pfx = 'propl'
elif field in obj_class.prop_map_fields:
prop_pfx = 'propm'
else:
continue
if position:
col_start = '%s:%s:%s' % (prop_pfx, field, position)
col_end = '%s:%s:%s' % (prop_pfx, field, position)
else:
col_start = '%s:%s:' % (prop_pfx, field)
col_end = '%s:%s;' % (prop_pfx, field)
obj_cols = self._cassandra_driver.xget(
datastore_api.OBJ_UUID_CF_NAME,
obj_uuid,
start=col_start,
finish=col_end)
result[field] = []
for name, value in obj_cols:
# tuple of col_value, position. result is already sorted
# lexically by position (necessary only for list property)
result[field].append((json.loads(value), name.split(':', 2)[-1]))
return (True, result)
# end prop_collection_read
def cache_uuid_to_fq_name_add(self, id, fq_name, obj_type):
self._cache_uuid_to_fq_name[id] = (fq_name, obj_type)
# end cache_uuid_to_fq_name_add
def cache_uuid_to_fq_name_del(self, id):
self._cache_uuid_to_fq_name.pop(id, None)
# end cache_uuid_to_fq_name_del
def uuid_to_fq_name(self, id):
try:
return copy.copy(self._cache_uuid_to_fq_name[id][0])
except KeyError:
obj = self._cassandra_driver.get(datastore_api.OBJ_UUID_CF_NAME, id,
columns=['fq_name', 'type'])
if not obj:
raise NoIdError(id)
if 'type' not in obj or 'fq_name' not in obj:
raise NoIdError(id)
fq_name = obj['fq_name']
obj_type = obj['type']
self.cache_uuid_to_fq_name_add(id, fq_name, obj_type)
return copy.copy(fq_name)
# end uuid_to_fq_name
def uuid_to_obj_type(self, id):
try:
return self._cache_uuid_to_fq_name[id][1]
except KeyError:
obj = self._cassandra_driver.get(datastore_api.OBJ_UUID_CF_NAME, id,
columns=['fq_name', 'type'])
if not obj:
raise NoIdError(id)
if 'type' not in obj or 'fq_name' not in obj:
raise NoIdError(id)
fq_name = obj['fq_name']
obj_type = obj['type']
self.cache_uuid_to_fq_name_add(id, fq_name, obj_type)
return obj_type
# end uuid_to_obj_type
def fq_name_to_uuid(self, obj_type, fq_name):
fq_name_str = utils.encode_string(':'.join(fq_name))
col_infos = self._cassandra_driver.get(datastore_api.OBJ_FQ_NAME_CF_NAME,
obj_type,
start=fq_name_str + ':',
finish=fq_name_str + ';')
if not col_infos:
raise NoIdError('%s %s' % (obj_type, fq_name_str))
if len(col_infos) > 1:
raise VncError('Multi match %s for %s' % (fq_name_str, obj_type))
fq_name_uuid = utils.decode_string(col_infos.popitem()[0]).split(':')
if obj_type != 'route_target' and fq_name_uuid[:-1] != fq_name:
raise NoIdError('%s %s' % (obj_type, fq_name_str))
return fq_name_uuid[-1]
# end fq_name_to_uuid
# return all objects shared with a (share_type, share_id)
def get_shared(self, obj_type, share_id='', share_type='global'):
result = []
column = '%s:%s' % (share_type, share_id)
col_infos = self._cassandra_driver.get(datastore_api.OBJ_SHARED_CF_NAME,
obj_type,
start=column + ':',
finish=column + ';')
if not col_infos:
return None
for (col_name, col_val) in list(col_infos.items()):
# ('*:*:f7963198-08a4-4b96-a02e-41cc66593163', u'7')
obj_uuid = col_name.split(':')[-1]
result.append((obj_uuid, col_val))
return result
# share an object 'obj_id' with <share_type:share_id>
# rwx indicate type of access (sharing) allowed
def set_shared(self, obj_type, obj_id, share_id = '', share_type = 'global', rwx = 7):
col_name = '%s:%s:%s' % (share_type, share_id, obj_id)
self._cassandra_driver.insert(
cf_name=datastore_api.OBJ_SHARED_CF_NAME,
key=obj_type,
columns={col_name:json.dumps(rwx)})
# delete share of 'obj_id' object with <share_type:share_id>
def del_shared(self, obj_type, obj_id, share_id = '', share_type = 'global'):
col_name = '%s:%s:%s' % (share_type, share_id, obj_id)
self._cassandra_driver.remove(
cf_name=datastore_api.OBJ_SHARED_CF_NAME,
key=obj_type,
columns=[col_name])
def _render_obj_from_db(self, obj_class, obj_rows, field_names=None,
include_backrefs_children=False):
ref_fields = obj_class.ref_fields
backref_fields = obj_class.backref_fields
children_fields = obj_class.children_fields
list_fields = obj_class.prop_list_fields
map_fields = obj_class.prop_map_fields
prop_fields = obj_class.prop_fields - (list_fields | map_fields)
results = {}
for obj_uuid, obj_cols in list(obj_rows.items()):
if 'type' not in obj_cols or 'fq_name' not in obj_cols:
# if object has been deleted, these fields may not
# be present
continue
if obj_class.object_type != obj_cols.pop('type')[0]:
continue
id_perms_ts = 0
row_latest_ts = 0
result = {}
result['uuid'] = obj_uuid
result['fq_name'] = obj_cols.pop('fq_name')[0]
for col_name in list(obj_cols.keys()):
if self._is_parent(col_name):
# non config-root child
(_, _, parent_uuid) = col_name.split(':')
try:
result['parent_type'] = obj_cols['parent_type'][0]
except KeyError:
# parent_type may not be present in obj_cols
pass
result['parent_uuid'] = parent_uuid
continue
if self._is_prop(col_name):
(_, prop_name) = col_name.split(':')
if prop_name == 'id_perms':
id_perms_ts = obj_cols[col_name][1]
if ((prop_name not in prop_fields) or
(field_names and prop_name not in field_names)):
continue
result[prop_name] = obj_cols[col_name][0]
continue
if self._is_prop_list(col_name):
(_, prop_name, prop_elem_position) = col_name.split(':')
if field_names and prop_name not in field_names:
continue
if obj_class.prop_list_field_has_wrappers[prop_name]:
prop_field_types = obj_class.prop_field_types[prop_name]
wrapper_type = prop_field_types['xsd_type']
wrapper_cls = self._get_xsd_class(wrapper_type)
wrapper_field = wrapper_cls.attr_fields[0]
if prop_name not in result:
result[prop_name] = {wrapper_field: []}
result[prop_name][wrapper_field].append(
(obj_cols[col_name][0], prop_elem_position))
else:
if prop_name not in result:
result[prop_name] = []
result[prop_name].append((obj_cols[col_name][0],
prop_elem_position))
continue
if self._is_prop_map(col_name):
(_, prop_name, _) = col_name.split(':', 2)
if field_names and prop_name not in field_names:
continue
if obj_class.prop_map_field_has_wrappers[prop_name]:
prop_field_types = obj_class.prop_field_types[prop_name]
wrapper_type = prop_field_types['xsd_type']
wrapper_cls = self._get_xsd_class(wrapper_type)
wrapper_field = wrapper_cls.attr_fields[0]
if prop_name not in result:
result[prop_name] = {wrapper_field: []}
result[prop_name][wrapper_field].append(
obj_cols[col_name][0])
else:
if prop_name not in result:
result[prop_name] = []
result[prop_name].append(obj_cols[col_name][0])
continue
if self._is_children(col_name):
(_, child_type, child_uuid) = col_name.split(':')
if field_names and '%ss' %(child_type) not in field_names:
continue
if child_type+'s' not in children_fields:
continue
child_tstamp = obj_cols[col_name][1]
try:
self._read_child(result, obj_uuid, child_type,
child_uuid, child_tstamp)
except NoIdError:
continue
continue
if self._is_ref(col_name):
(_, ref_type, ref_uuid) = col_name.split(':')
if ((ref_type+'_refs' not in ref_fields) or
(field_names and ref_type + '_refs' not in field_names)):
continue
self._read_ref(result, obj_uuid, ref_type, ref_uuid,
obj_cols[col_name][0])
continue
if self._is_backref(col_name):
(_, back_ref_type, back_ref_uuid) = col_name.split(':')
if back_ref_type+'_back_refs' not in backref_fields:
continue
if (field_names and
'%s_back_refs' %(back_ref_type) not in field_names):
continue
try:
self._read_back_ref(result, obj_uuid, back_ref_type,
back_ref_uuid, obj_cols[col_name][0])
except NoIdError:
continue
continue
if self._is_metadata(col_name):
(_, meta_type) = col_name.split(':')
if meta_type == 'latest_col_ts':
row_latest_ts = obj_cols[col_name][1]
continue
# for all column names
# sort children by creation time
for child_field in obj_class.children_fields:
if child_field not in result:
continue
sorted_children = sorted(result[child_field],
key = itemgetter('tstamp'))
# re-write result's children without timestamp
result[child_field] = sorted_children
[child.pop('tstamp') for child in result[child_field]]
# for all children
# Ordering property lists by position attribute
for prop_name in (obj_class.prop_list_fields & set(result.keys())):
if isinstance(result[prop_name], list):
result[prop_name] = [el[0] for el in
sorted(result[prop_name],
key=itemgetter(1))]
elif isinstance(result[prop_name], dict):
wrapper, unsorted_list = result[prop_name].popitem()
result[prop_name][wrapper] = [el[0] for el in
sorted(unsorted_list,
key=itemgetter(1))]
# 'id_perms_ts' tracks timestamp of id-perms column
# i.e. latest update of *any* prop or ref.
# 'row_latest_ts' tracks timestamp of last modified column
# so any backref/children column is also captured. 0=>unknown
results[obj_uuid] = {'obj_dict': result,
'id_perms_ts': id_perms_ts}
if include_backrefs_children:
# update our copy of ts only if we read the
# corresponding fields from db
results[obj_uuid]['row_latest_ts'] = row_latest_ts
# end for all rows
return results
# end _render_obj_from_db
def _read_child(self, result, obj_uuid, child_obj_type, child_uuid,
child_tstamp):
if '%ss' % (child_obj_type) not in result:
result['%ss' % (child_obj_type)] = []
child_res_type = self._get_resource_class(child_obj_type).resource_type
child_info = {}
child_info['to'] = self.uuid_to_fq_name(child_uuid)
child_info['uuid'] = child_uuid
child_info['tstamp'] = child_tstamp
result['%ss' % (child_obj_type)].append(child_info)
# end _read_child
def _read_ref(self, result, obj_uuid, ref_obj_type, ref_uuid, ref_data_json):
if '%s_refs' % (ref_obj_type) not in result:
result['%s_refs' % (ref_obj_type)] = []
ref_data = ref_data_json
ref_info = {}
try:
ref_info['to'] = self.uuid_to_fq_name(ref_uuid)
except NoIdError:
ref_info['to'] = ['ERROR']
if ref_data:
try:
ref_info['attr'] = ref_data['attr']
except KeyError:
# TODO remove backward compat old format had attr directly
ref_info['attr'] = ref_data
ref_info['uuid'] = ref_uuid
result['%s_refs' % (ref_obj_type)].append(ref_info)
# end _read_ref
def _read_back_ref(self, result, obj_uuid, back_ref_obj_type, back_ref_uuid,
back_ref_data_json):
if '%s_back_refs' % (back_ref_obj_type) not in result:
result['%s_back_refs' % (back_ref_obj_type)] = []
back_ref_info = {}
back_ref_info['to'] = self.uuid_to_fq_name(back_ref_uuid)
back_ref_data = back_ref_data_json
if back_ref_data:
try:
back_ref_info['attr'] = back_ref_data['attr']
except KeyError:
# TODO remove backward compat old format had attr directly
back_ref_info['attr'] = back_ref_data
back_ref_info['uuid'] = back_ref_uuid
result['%s_back_refs' % (back_ref_obj_type)].append(back_ref_info)
# end _read_back_ref
def walk(self, fn=None):
type_to_object = {}
for obj_uuid, obj_col in self._cassandra_driver.get_range(
datastore_api.OBJ_UUID_CF_NAME,
columns=['type', 'fq_name']):
try:
obj_type = json.loads(obj_col['type'])
obj_fq_name = json.loads(obj_col['fq_name'])
# prep cache to avoid n/w round-trip in db.read for ref
self.cache_uuid_to_fq_name_add(obj_uuid, obj_fq_name, obj_type)
try:
type_to_object[obj_type].append(obj_uuid)
except KeyError:
type_to_object[obj_type] = [obj_uuid]
except Exception as e:
self._logger('Error in db walk read %s' % (str(e)),
level=SandeshLevel.SYS_ERR)
continue
if fn is None:
return []
walk_results = []
for obj_type, uuid_list in list(type_to_object.items()):
try:
self._logger('DB walk: obj_type %s len %s'
% (obj_type, len(uuid_list)),
level=SandeshLevel.SYS_INFO)
result = fn(obj_type, uuid_list)
if result:
walk_results.append(result)
except Exception as e:
self._logger('Error in db walk invoke %s' % (str(e)),
level=SandeshLevel.SYS_ERR)
continue
return walk_results
# end walk
# end class VncCassandraClient
class ObjectCacheManager(object):
class CachedObject(object):
# provide a read-only copy in so far as
# top level keys cannot be add/mod/del
class RODict(dict):
def __readonly__(self, *args, **kwargs):
raise RuntimeError("Cannot modify ReadOnlyDict")
__setitem__ = __readonly__
__delitem__ = __readonly__
pop = __readonly__
popitem = __readonly__
clear = __readonly__
update = __readonly__
setdefault = __readonly__
del __readonly__
# end RODict
def __init__(self, obj_dict, id_perms_ts, row_latest_ts):
self.obj_dict = self.RODict(obj_dict)
self.id_perms_ts = id_perms_ts
self.row_latest_ts = row_latest_ts
# end __init__
def update_obj_dict(self, new_obj_dict):
self.obj_dict = self.RODict(new_obj_dict)
# end update_obj_dict
def get_filtered_copy(self, field_names=None):
if not field_names:
return self.obj_dict
# TODO filter with field_names
return {k: copy.deepcopy(self.obj_dict[k])
for k in field_names if k in self.obj_dict}
# end get_filtered_copy
# end class CachedObject
def __init__(self, logger, db_client, max_entries,
obj_cache_exclude_types=None, debug_obj_cache_types=None):
self._logger = logger
self.max_entries = max_entries
self._db_client = db_client
self._cache = OrderedDict()
self._obj_cache_exclude_types = set(obj_cache_exclude_types or [])
self._debug_obj_cache_types = set(debug_obj_cache_types or [])
self._debug_obj_cache_types -= self._obj_cache_exclude_types
# end __init__
def _log(self, msg, level=SandeshLevel.SYS_DEBUG):
msg = 'Object UUID cache manager: %s' % msg
self._logger(msg, level)
def evict(self, obj_type, obj_uuids):
for obj_uuid in obj_uuids:
try:
obj_dict = self._cache.pop(obj_uuid).obj_dict
if obj_type in self._debug_obj_cache_types:
self._log("%s %s (%s) was evicted from cache. Cache "
"contained: %s" % (
obj_type.replace('_', '-').title(),
':'.join(obj_dict['fq_name']),
obj_uuid,
pformat(obj_dict),
),
)
except KeyError:
continue
# end evict
def set(self, obj_type, db_rendered_objs, req_fields,
include_backrefs_children):
# build up results with field filter
result_obj_dicts = []
if req_fields:
result_fields = set(req_fields) | set(['fq_name', 'uuid',
'parent_type', 'parent_uuid'])
for obj_uuid, render_info in list(db_rendered_objs.items()):
id_perms_ts = render_info.get('id_perms_ts', 0)
row_latest_ts = render_info.get('row_latest_ts', 0)
cached_obj = self._cache.pop(obj_uuid, None)
if cached_obj is not None:
# if we had stale, just update from new db value
cached_obj.update_obj_dict(render_info['obj_dict'])
cached_obj.id_perms_ts = id_perms_ts
if include_backrefs_children:
cached_obj.row_latest_ts = row_latest_ts
else:
# this was a miss in cache
cached_obj = self.CachedObject(
render_info['obj_dict'],
id_perms_ts,
row_latest_ts,
)
if len(self._cache) >= self.max_entries:
# get first element (least recently used)
# without getting full copy of dict keys
if hasattr(self._cache, 'iterkeys'):
key = next(iter(list(self._cache.iterkeys())))
else:
# 'keys()' returns an iterator with PY3.
key = next(iter(list(self._cache.keys())))
self.evict(obj_type, [key])
self._cache[obj_uuid] = cached_obj
if obj_type in self._debug_obj_cache_types:
self._log("%s %s (%s) was set in cache with values: %s" % (
obj_type.replace('_', ' ').title(),
':'.join(cached_obj.obj_dict['fq_name']),
obj_uuid,
pformat(cached_obj.obj_dict),
),
)
if req_fields:
result_obj_dicts.append(
cached_obj.get_filtered_copy(result_fields))
else:
result_obj_dicts.append(cached_obj.get_filtered_copy())
# end for all rendered objects
return result_obj_dicts
# end set
def read(self, obj_class, obj_uuids, req_fields, include_backrefs_children):
# find which keys are a hit, find which hit keys are not stale
# return hit entries and miss+stale uuids.
hit_uuids = []
miss_uuids = []
for obj_uuid in obj_uuids:
if obj_uuid in self._cache:
hit_uuids.append(obj_uuid)
else:
miss_uuids.append(obj_uuid)
stale_uuids = []
# staleness when include_backrefs_children is False = id_perms tstamp
# when include_backrefs_children is True = latest_col_ts tstamp
if include_backrefs_children:
stale_check_col_name = 'META:latest_col_ts'
stale_check_ts_attr = 'row_latest_ts'
else:
stale_check_col_name = 'prop:id_perms'
stale_check_ts_attr = 'id_perms_ts'
hit_rows_in_db = self._db_client._cassandra_driver.multiget(
datastore_api.OBJ_UUID_CF_NAME, hit_uuids,
columns=[stale_check_col_name], timestamp=True)
obj_dicts = []
result_fields = {'fq_name', 'uuid', 'parent_type', 'parent_uuid'}
if req_fields:
result_fields = set(req_fields) | result_fields
for hit_uuid in hit_uuids:
try:
obj_cols = hit_rows_in_db[hit_uuid]
cached_obj = self._cache[hit_uuid]
except KeyError:
# Either stale check column missing, treat as miss
# Or entry could have been evicted while context switched
# for reading stale-check-col, treat as miss
miss_uuids.append(hit_uuid)
continue
if (getattr(cached_obj, stale_check_ts_attr) !=
obj_cols[stale_check_col_name][1]):
miss_uuids.append(hit_uuid)
stale_uuids.append(hit_uuid)
continue
if req_fields:
obj_dicts.append(cached_obj.get_filtered_copy(result_fields))
else:
obj_dicts.append(cached_obj.get_filtered_copy())
if obj_class.object_type in self._debug_obj_cache_types:
obj_rows = self._db_client._cassandra_driver.multiget(
datastore_api.OBJ_UUID_CF_NAME,
[hit_uuid],
timestamp=True)
rendered_objs = self._db_client._render_obj_from_db(
obj_class, obj_rows, req_fields, include_backrefs_children)
db_obj_dict = rendered_objs[hit_uuid]['obj_dict']
self._log("%s %s (%s) was read from cache.\nDB values: %s\n"
"Cache value: %s\n" % (
obj_class.object_type.replace('_', ' ').title(),
':'.join(cached_obj.obj_dict['fq_name']),
hit_uuid,
pformat(db_obj_dict),
pformat(cached_obj.obj_dict),
),
)
# end for all hit in cache
self.evict(obj_class.object_type, stale_uuids)
return obj_dicts, miss_uuids
# end read
def dump_cache(self, obj_uuids=None, count=10):
obj_dicts = {}
i = 1
if obj_uuids:
for obj_uuid in obj_uuids:
try:
obj = self._cache[obj_uuid]
except KeyError:
continue
obj_json = json.dumps(obj, default=lambda o: dict((k, v)
for k, v in list(o.__dict__.items())))
obj_dicts[i] = json.loads(obj_json)
i += 1
else:
for key in self._cache:
if i > count:
break
obj = self._cache[key]
obj_json = json.dumps(obj, default=lambda o: dict((k, v)
for k, v in list(o.__dict__.items())))
obj_dicts[i] = json.loads(obj_json)
i += 1
return obj_dicts
# end class ObjectCacheManager
| src/config/common/cfgm_common/vnc_cassandra.py | 73,932 | Copyright (c) 2014 Juniper Networks, Inc. All rights reserved. end _get_resource_class end get_db_info TODO(sahid): To satisfy test-framework which has its specific py3 support for thrift we can have the above condition, when that will be fixed we could uncomment the code.if six.PY3: raise VncError( "selected driver `{}` not supported for Python 3.".format( cassandra_driver)) these functions make calls to pycassa xget() and get_range() generator functions which can't be wrapped around handle_exceptions() at the time of cassandra init, hence need to wrap these functions that uses it to catch cassandra connection failures. end __init__ end _create_prop prop has been accounted for, remove so only new ones remain end _update_prop end _add_to_prop_list end _delete_from_prop_list end _set_in_prop_map end _delete_from_prop_map update latest_col_ts on parent object end _create_child update latest_col_ts on parent object end _delete_child update latest_col_ts on referred object evict other side of ref since it is stale from GET /<old-ref-uuid> pov. end _create_ref update body didn't touch this type, nop remove old ref old_ref_uuid might have been deleted if cache has the link, it will be evicted if cache doesn't have, keyerror is caught and continued retain old ref with new ref attr uuid has been accounted for, remove so only new ones remain update latest_col_ts on referred object evict other side of ref since it is stale from GET /<old-ref-uuid> pov. end _update_ref ref_uuid might have been deleted if cache has the link, it will be evicted if cache doesn't have, keyerror is caught and continued update latest_col_ts on referred object evict other side of ref since it is stale from GET /<old-ref-uuid> pov. end _delete_ref end _get_xsd_class Gather column values for obj and updates to backrefs in a batch and write it at the end non config-root child Properties Specifically checking for None store list elements in list order iterate on wrapped element or directly or prop field iterate on wrapped element or directly or prop field References e.g. ref_field = 'network_ipam_refs' ref_res_type = 'network-ipam' ref_link_type = 'VnSubnetsType' is_weakref = False Update fqname table end object_create strip 'prop:' before sending result back if field_names=None, all fields will be read/returned optimize for common case of reading non-backref, non-children fields ignoring columns starting from 'b' and 'c' - significant performance impact in scaled setting. e.g. read of project For caching (when ret values will be used for readonly e.g. object read/list context): 1. pick the hits, and for the misses.. 2. read from db, cache, filter with fields else read from db with specified field filters atleast one backref/children field is needed ignore reading backref + children columns caller may modify returned value, or cannot fit in cache, just render with filter and don't cache can fit and caller won't modify returned value, so render without filter, cache and return cached value end object_read end object_count_children end update_last_modified end update_latest_col_ts Grab ref-uuids and properties in new version Properties References e.g. ref_field = 'network_ipam_refs' ref_type = 'network-ipam' ref_link_type = 'VnSubnetsType' is_weakref = False Gather column values for obj and updates to backrefs in a batch and write it at the end id-perms always has to be updated for last-mod timestamp get it from request dict(or from db if not in request dict) delete all old values of prop list delete all old values of prop list for all column names create new refs create new props store list elements in list order iterate on wrapped element or directly on prop field for wrapped lists, store without the wrapper. regenerate wrapper on read store map elements in key order iterate on wrapped element or directly on prop field for wrapped lists, store without the wrapper. regenerate wrapper on read end object_update give chance for zk heartbeat/ping end filter_rows end get_fq_name_uuid_list go from parent to child tune start and count if paginated on same rowif paginate_start and (len(parent_uuids) == 1): flatten to [('children:<type>:<uuid>', (<val>,<ts>), *] give chance for zk heartbeat/ping end filter_rows_parent_anchor go from anchor to backrefs get next lexical value of marker flatten to [('backref:<obj-type>:<uuid>', (<val>,<ts>), *] give chance for zk heartbeat/ping end filter_rows_backref_anchor exact objects specified paginate through objects in list order of obj_uuids simulate end of pagination end filter_rows_object_list grab all resources of this type give chance for zk heartbeat/ping end filter_rows_no_anchor when listing all objects of a type return early if only count query is in request for anchored list with pagination, prune from union of anchors and last uuid is marker end object_list unlink from parent remove refs remove link from relaxed back refs Update fqname table Purge map naming cache end object_delete always read-in id-perms for upper-layers to do rbac/visibility read in prop-list or prop-map fields tuple of col_value, position. result is already sorted lexically by position (necessary only for list property) end prop_collection_read end cache_uuid_to_fq_name_add end cache_uuid_to_fq_name_del end uuid_to_fq_name end uuid_to_obj_type end fq_name_to_uuid return all objects shared with a (share_type, share_id) ('*:*:f7963198-08a4-4b96-a02e-41cc66593163', u'7') share an object 'obj_id' with <share_type:share_id> rwx indicate type of access (sharing) allowed delete share of 'obj_id' object with <share_type:share_id> if object has been deleted, these fields may not be present non config-root child parent_type may not be present in obj_cols for all column names sort children by creation time re-write result's children without timestamp for all children Ordering property lists by position attribute 'id_perms_ts' tracks timestamp of id-perms column i.e. latest update of *any* prop or ref. 'row_latest_ts' tracks timestamp of last modified column so any backref/children column is also captured. 0=>unknown update our copy of ts only if we read the corresponding fields from db end for all rows end _render_obj_from_db end _read_child TODO remove backward compat old format had attr directly end _read_ref TODO remove backward compat old format had attr directly end _read_back_ref prep cache to avoid n/w round-trip in db.read for ref end walk end class VncCassandraClient provide a read-only copy in so far as top level keys cannot be add/mod/del end RODict end __init__ end update_obj_dict TODO filter with field_names end get_filtered_copy end class CachedObject end __init__ end evict build up results with field filter if we had stale, just update from new db value this was a miss in cache get first element (least recently used) without getting full copy of dict keys 'keys()' returns an iterator with PY3. end for all rendered objects end set find which keys are a hit, find which hit keys are not stale return hit entries and miss+stale uuids. staleness when include_backrefs_children is False = id_perms tstamp when include_backrefs_children is True = latest_col_ts tstamp Either stale check column missing, treat as miss Or entry could have been evicted while context switched for reading stale-check-col, treat as miss end for all hit in cache end read end class ObjectCacheManager | 7,442 | en | 0.738152 |
import django
import six
from django.http import HttpResponseRedirect
if django.VERSION[0] < 2:
from django.core.urlresolvers import reverse
else:
from django.urls import reverse
from django.db import transaction
from django.utils import timezone
import logging
from processlib.assignment import inherit
from processlib.tasks import run_async_activity
logger = logging.getLogger(__name__)
@six.python_2_unicode_compatible
class Activity(object):
def __init__(
self,
flow,
process,
instance,
name,
verbose_name=None,
permission=None,
auto_create_permission=True,
permission_name=None,
skip_if=None,
assign_to=inherit,
):
self.flow = flow
self.process = process
self.verbose_name = verbose_name
self.permission = permission
self.auto_create_permission = auto_create_permission
self.permission_name = permission_name or verbose_name or name
self.name = name
self.instance = instance
# ensure that we have a single referenced process object
if self.instance:
self.instance.process = self.process
self._skip = skip_if
self._get_assignment = assign_to
def should_skip(self):
if not self._skip:
return False
return self._skip(self)
def should_wait(self):
return False
def has_view(self):
return False
def __str__(self):
return six.text_type(self.verbose_name or self.name)
def __repr__(self):
return '{}(name="{}")'.format(self.__class__.__name__, self.name)
def instantiate(
self, predecessor=None, instance_kwargs=None, request=None, **kwargs
):
assert not self.instance
instance_kwargs = instance_kwargs or {}
request_user = (
request.user if request and request.user.is_authenticated else None
)
user, group = self._get_assignment(
request_user=request_user, predecessor=predecessor
)
if "assigned_user" not in instance_kwargs:
instance_kwargs["assigned_user"] = user
if "assigned_group" not in instance_kwargs:
instance_kwargs["assigned_group"] = group
self.instance = self.flow.activity_model(
process=self.process, activity_name=self.name, **(instance_kwargs or {})
)
self.instance.save()
if predecessor:
self.instance.predecessors.add(predecessor.instance)
def assign_to(self, user, group):
self.instance.assigned_user = user
self.instance.assigned_group = group
self.instance.save()
def start(self, **kwargs):
assert self.instance.status in (
self.instance.STATUS_INSTANTIATED,
self.instance.STATUS_SCHEDULED,
)
if not self.instance.started_at:
self.instance.started_at = timezone.now()
self.instance.status = self.instance.STATUS_STARTED
def finish(self, **kwargs):
assert self.instance.status == self.instance.STATUS_STARTED
if not self.instance.finished_at:
self.instance.finished_at = timezone.now()
self.instance.status = self.instance.STATUS_DONE
self.instance.modified_by = kwargs.get("user", None)
self.instance.save()
self._instantiate_next_activities()
def cancel(self, **kwargs):
assert self.instance.status in (
self.instance.STATUS_INSTANTIATED,
self.instance.STATUS_ERROR,
)
self.instance.status = self.instance.STATUS_CANCELED
self.instance.modified_by = kwargs.get("user", None)
self.instance.save()
def undo(self, **kwargs):
assert self.instance.status == self.instance.STATUS_DONE
self.instance.finished_at = None
self.instance.status = self.instance.STATUS_INSTANTIATED
self.instance.modified_by = kwargs.get("user", None)
self.instance.save()
undo_callback = getattr(self.process, "undo_{}".format(self.name), None)
if undo_callback is not None:
undo_callback()
def error(self, **kwargs):
assert self.instance.status != self.instance.STATUS_DONE
self.instance.status = self.instance.STATUS_ERROR
self.instance.finished_at = timezone.now()
self.instance.modified_by = kwargs.get("user", None)
self.instance.save()
def _get_next_activities(self):
for activity_name in self.flow._out_edges[self.name]:
activity = self.flow._get_activity_by_name(
process=self.process, activity_name=activity_name
)
if activity.should_skip():
for later_activity in activity._get_next_activities():
yield later_activity
else:
yield activity
def _instantiate_next_activities(self):
for activity in self._get_next_activities():
activity.instantiate(predecessor=self)
class State(Activity):
"""
An activity that simple serves as a marker for a certain state being reached, e.g.
if the activity before it was conditional.
"""
def instantiate(self, **kwargs):
super(State, self).instantiate(**kwargs)
self.start()
self.finish()
class ViewActivity(Activity):
def __init__(self, view=None, **kwargs):
super(ViewActivity, self).__init__(**kwargs)
if view is None:
raise ValueError(
"A ViewActivity requires a view, non given for {}.{}".format(
self.flow.label, self.name
)
)
self.view = view
def has_view(self):
return True
def get_absolute_url(self):
return reverse(
"processlib:process-activity",
kwargs={"flow_label": self.flow.label, "activity_id": self.instance.pk},
)
def dispatch(self, request, *args, **kwargs):
kwargs["activity"] = self
return self.view(request, *args, **kwargs)
class FunctionActivity(Activity):
def __init__(self, callback=None, **kwargs):
self.callback = callback
super(FunctionActivity, self).__init__(**kwargs)
def instantiate(self, **kwargs):
super(FunctionActivity, self).instantiate(**kwargs)
self.start()
def start(self, **kwargs):
super(FunctionActivity, self).start(**kwargs)
try:
self.callback(self)
except Exception as e:
logger.exception(e)
self.error(exception=e)
return
self.finish()
def retry(self):
assert self.instance.status == self.instance.STATUS_ERROR
self.instance.status = self.instance.STATUS_INSTANTIATED
self.instance.finished_at = None
self.instance.save()
self.start()
class AsyncActivity(Activity):
def __init__(self, callback=None, **kwargs):
self.callback = callback
super(AsyncActivity, self).__init__(**kwargs)
def instantiate(self, **kwargs):
super(AsyncActivity, self).instantiate(**kwargs)
self.schedule()
def schedule(self, **kwargs):
self.instance.status = self.instance.STATUS_SCHEDULED
self.instance.scheduled_at = timezone.now()
self.instance.save()
transaction.on_commit(
lambda: run_async_activity.delay(self.flow.label, self.instance.pk)
)
def retry(self, **kwargs):
assert self.instance.status == self.instance.STATUS_ERROR
self.instance.status = self.instance.STATUS_INSTANTIATED
self.instance.finished_at = None
self.schedule(**kwargs)
def start(self, **kwargs):
super(AsyncActivity, self).start(**kwargs)
self.callback(self)
class AsyncViewActivity(AsyncActivity):
"""
An async activity that renders a view while the async task is running.
The view could be AsyncActivityView with a custom template_name
"""
def __init__(self, view=None, **kwargs):
super(AsyncViewActivity, self).__init__(**kwargs)
if view is None:
raise ValueError(
"An AsyncViewActivity requires a view, non given for {}.{}".format(
self.flow.label, self.name
)
)
self.view = view
def has_view(self):
return True
def get_absolute_url(self):
return reverse(
"processlib:process-activity",
kwargs={"flow_label": self.flow.label, "activity_id": self.instance.pk},
)
def dispatch(self, request, *args, **kwargs):
kwargs["activity"] = self
return self.view(request, *args, **kwargs)
class StartMixin(Activity):
def instantiate(
self, predecessor=None, instance_kwargs=None, request=None, **kwargs
):
assert not self.instance
assert not predecessor
instance_kwargs = instance_kwargs or {}
request_user = (
request.user if request and request.user.is_authenticated else None
)
user, group = self._get_assignment(
request_user=request_user, predecessor=predecessor
)
if "assigned_user" not in instance_kwargs:
instance_kwargs["assigned_user"] = user
if "assigned_group" not in instance_kwargs:
instance_kwargs["assigned_group"] = group
self.instance = self.flow.activity_model(
process=self.process, activity_name=self.name, **(instance_kwargs or {})
)
def finish(self, **kwargs):
assert self.instance.status == self.instance.STATUS_STARTED
if not self.instance.finished_at:
self.instance.finished_at = timezone.now()
self.process.save()
self.instance.process = self.process
self.instance.status = self.instance.STATUS_DONE
self.instance.modified_by = kwargs.get("user", None)
self.instance.save()
self._instantiate_next_activities()
class StartActivity(StartMixin, Activity):
pass
class StartViewActivity(StartMixin, ViewActivity):
pass
class EndActivity(Activity):
def instantiate(self, **kwargs):
super(EndActivity, self).instantiate(**kwargs)
self.start()
self.finish()
def finish(self, **kwargs):
super(EndActivity, self).finish(**kwargs)
update_fields = []
if not self.process.finished_at:
self.process.finished_at = self.instance.finished_at
update_fields.append("finished_at")
if not self.process.status == self.process.STATUS_DONE:
self.process.status = self.process.STATUS_DONE
update_fields.append("status")
self.process.save(update_fields=update_fields)
class EndRedirectActivity(EndActivity):
def __init__(self, redirect_url_callback=None, **kwargs):
self.redirect_url_callback = redirect_url_callback
super(EndActivity, self).__init__(**kwargs)
def instantiate(self, **kwargs):
# HACK: we skip the EndActivity implementation
# because it would finish the activity right away
super(EndActivity, self).instantiate(**kwargs)
def has_view(self):
return True
def get_absolute_url(self):
return reverse(
"processlib:process-activity",
kwargs={"flow_label": self.flow.label, "activity_id": self.instance.pk},
)
def dispatch(self, request, *args, **kwargs):
self.start()
url = reverse(
"processlib:process-detail", kwargs={"pk": self.instance.process.pk}
)
try:
if self.redirect_url_callback:
url = self.redirect_url_callback(self)
self.finish()
except Exception as e:
logger.exception(e)
self.error(exception=e)
return HttpResponseRedirect(url)
class FormActivity(Activity):
def __init__(self, form_class=None, **kwargs):
self.form_class = form_class
super(FormActivity, self).__init__(**kwargs)
def get_form(self, **kwargs):
return self.form_class(**kwargs)
class StartFormActivity(StartMixin, FormActivity):
pass
class IfElse(Activity):
def __init__(self, flow, process, instance, name, **kwargs):
super(IfElse, self).__init__(flow, process, instance, name, **kwargs)
class Wait(Activity):
def __init__(self, flow, process, instance, name, **kwargs):
wait_for = kwargs.pop("wait_for", None)
if not wait_for:
raise ValueError("Wait activity needs to wait for something.")
super(Wait, self).__init__(flow, process, instance, name, **kwargs)
self._wait_for = set(wait_for) if wait_for else None
def _find_existing_instance(self, predecessor):
candidates = list(
self.flow.activity_model.objects.filter(
process=self.process, activity_name=self.name
)
)
for candidate in candidates:
# FIXME this only corrects for simple loops, may fail with more complex scenarios
if not candidate.successors.filter(
status=candidate.STATUS_DONE, activity_name=self.name
).exists():
return candidate
raise self.flow.activity_model.DoesNotExist()
def instantiate(self, predecessor=None, instance_kwargs=None, **kwargs):
if predecessor is None:
raise ValueError("Can't wait for something without a predecessor.")
# find the instance
try:
self.instance = self._find_existing_instance(predecessor)
except self.flow.activity_model.DoesNotExist:
self.instance = self.flow.activity_model(
process=self.process, activity_name=self.name, **(instance_kwargs or {})
)
self.instance.save()
self.instance.predecessors.add(predecessor.instance)
self.start()
def start(self, **kwargs):
if not self.instance.started_at:
self.instance.started_at = timezone.now()
self.instance.status = self.instance.STATUS_STARTED
self.instance.save()
predecessor_names = {
instance.activity_name for instance in self.instance.predecessors.all()
}
if self._wait_for.issubset(predecessor_names):
self.finish()
| processlib/activity.py | 14,466 | An async activity that renders a view while the async task is running.
The view could be AsyncActivityView with a custom template_name
An activity that simple serves as a marker for a certain state being reached, e.g.
if the activity before it was conditional.
ensure that we have a single referenced process object HACK: we skip the EndActivity implementation because it would finish the activity right away FIXME this only corrects for simple loops, may fail with more complex scenarios find the instance | 508 | en | 0.915931 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import json
import os
import sys
def update_allure_feature_name(results_dir: str, prefix: str):
"""Make Allure JSON results unique by pre-pending a prefix to: name, historyId & uuid.
Use it when not all of the test results show up in the Allure report.
This is because tests from different workers can actually have the same: historyId & uuid values.
You can use e.g. browser name as the prefix.
"""
results_dir_path = os.path.join(".", results_dir)
update_count = 0
for filename in os.listdir(results_dir_path):
if filename.endswith(".json"):
result_file = os.path.join(results_dir_path, filename)
with open(result_file, "r") as json_file:
report = json.loads(json_file.read())
report["name"] = f"{prefix} - {report['name']}"
report["historyId"] = f"{prefix}{report['historyId']}"
report["uuid"] = f"{prefix}{report['uuid']}"
with open(result_file, "w") as json_file:
json.dump(report, json_file, indent=2, ensure_ascii=False)
update_count += 1
print(f"Updated {update_count} JSON reports")
if __name__ == "__main__":
update_allure_feature_name(results_dir=sys.argv[1], prefix=sys.argv[2])
| update_results.py | 1,324 | Make Allure JSON results unique by pre-pending a prefix to: name, historyId & uuid.
Use it when not all of the test results show up in the Allure report.
This is because tests from different workers can actually have the same: historyId & uuid values.
You can use e.g. browser name as the prefix.
!/usr/bin/env python3 -*- coding: utf-8 -*- | 343 | en | 0.839487 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('forms', '0016_auto_20150330_1413'),
]
operations = [
migrations.AlterField(
model_name='radiosheet',
name='item_number',
field=models.PositiveIntegerField(help_text='Write in the number that describes the position of the story within the newscast. E.g. the first story in the newscast is item 1; the seventh story is item 7.', verbose_name='(1) Item Number', choices=[(1, 1), (2, 2), (3, 3), (4, 4), (5, 5), (6, 6), (7, 7), (8, 8), (9, 9), (10, 10), (11, 11), (12, 12), (13, 13), (14, 14), (15, 15), (16, 16), (17, 17), (18, 18), (19, 19), (20, 20), (21, 21), (22, 22), (23, 23), (24, 24), (25, 25), (26, 26), (27, 27), (28, 28), (29, 29)]),
preserve_default=True,
),
migrations.AlterField(
model_name='televisionsheet',
name='item_number',
field=models.PositiveIntegerField(help_text='Write in the number that describes the position of the story within the newscast. E.g. the first story in the newscast is item 1; the seventh story is item 7.', verbose_name='(1) Item Number', choices=[(1, 1), (2, 2), (3, 3), (4, 4), (5, 5), (6, 6), (7, 7), (8, 8), (9, 9), (10, 10), (11, 11), (12, 12), (13, 13), (14, 14), (15, 15), (16, 16), (17, 17), (18, 18), (19, 19), (20, 20), (21, 21), (22, 22), (23, 23), (24, 24), (25, 25), (26, 26), (27, 27), (28, 28), (29, 29)]),
preserve_default=True,
),
]
| forms/migrations/0017_auto_20150331_1815.py | 1,610 | -*- coding: utf-8 -*- | 21 | en | 0.767281 |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
from ._inputs import *
__all__ = ['PrivateEndpointConnectionArgs', 'PrivateEndpointConnection']
@pulumi.input_type
class PrivateEndpointConnectionArgs:
def __init__(__self__, *,
factory_name: pulumi.Input[str],
resource_group_name: pulumi.Input[str],
private_endpoint_connection_name: Optional[pulumi.Input[str]] = None,
properties: Optional[pulumi.Input['PrivateLinkConnectionApprovalRequestArgs']] = None):
"""
The set of arguments for constructing a PrivateEndpointConnection resource.
:param pulumi.Input[str] factory_name: The factory name.
:param pulumi.Input[str] resource_group_name: The resource group name.
:param pulumi.Input[str] private_endpoint_connection_name: The private endpoint connection name.
:param pulumi.Input['PrivateLinkConnectionApprovalRequestArgs'] properties: Core resource properties
"""
pulumi.set(__self__, "factory_name", factory_name)
pulumi.set(__self__, "resource_group_name", resource_group_name)
if private_endpoint_connection_name is not None:
pulumi.set(__self__, "private_endpoint_connection_name", private_endpoint_connection_name)
if properties is not None:
pulumi.set(__self__, "properties", properties)
@property
@pulumi.getter(name="factoryName")
def factory_name(self) -> pulumi.Input[str]:
"""
The factory name.
"""
return pulumi.get(self, "factory_name")
@factory_name.setter
def factory_name(self, value: pulumi.Input[str]):
pulumi.set(self, "factory_name", value)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> pulumi.Input[str]:
"""
The resource group name.
"""
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: pulumi.Input[str]):
pulumi.set(self, "resource_group_name", value)
@property
@pulumi.getter(name="privateEndpointConnectionName")
def private_endpoint_connection_name(self) -> Optional[pulumi.Input[str]]:
"""
The private endpoint connection name.
"""
return pulumi.get(self, "private_endpoint_connection_name")
@private_endpoint_connection_name.setter
def private_endpoint_connection_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "private_endpoint_connection_name", value)
@property
@pulumi.getter
def properties(self) -> Optional[pulumi.Input['PrivateLinkConnectionApprovalRequestArgs']]:
"""
Core resource properties
"""
return pulumi.get(self, "properties")
@properties.setter
def properties(self, value: Optional[pulumi.Input['PrivateLinkConnectionApprovalRequestArgs']]):
pulumi.set(self, "properties", value)
class PrivateEndpointConnection(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
factory_name: Optional[pulumi.Input[str]] = None,
private_endpoint_connection_name: Optional[pulumi.Input[str]] = None,
properties: Optional[pulumi.Input[pulumi.InputType['PrivateLinkConnectionApprovalRequestArgs']]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
Private Endpoint Connection ARM resource.
API Version: 2018-06-01.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] factory_name: The factory name.
:param pulumi.Input[str] private_endpoint_connection_name: The private endpoint connection name.
:param pulumi.Input[pulumi.InputType['PrivateLinkConnectionApprovalRequestArgs']] properties: Core resource properties
:param pulumi.Input[str] resource_group_name: The resource group name.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: PrivateEndpointConnectionArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Private Endpoint Connection ARM resource.
API Version: 2018-06-01.
:param str resource_name: The name of the resource.
:param PrivateEndpointConnectionArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(PrivateEndpointConnectionArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
factory_name: Optional[pulumi.Input[str]] = None,
private_endpoint_connection_name: Optional[pulumi.Input[str]] = None,
properties: Optional[pulumi.Input[pulumi.InputType['PrivateLinkConnectionApprovalRequestArgs']]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = PrivateEndpointConnectionArgs.__new__(PrivateEndpointConnectionArgs)
if factory_name is None and not opts.urn:
raise TypeError("Missing required property 'factory_name'")
__props__.__dict__["factory_name"] = factory_name
__props__.__dict__["private_endpoint_connection_name"] = private_endpoint_connection_name
__props__.__dict__["properties"] = properties
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__.__dict__["resource_group_name"] = resource_group_name
__props__.__dict__["etag"] = None
__props__.__dict__["name"] = None
__props__.__dict__["type"] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:datafactory:PrivateEndpointConnection"), pulumi.Alias(type_="azure-native:datafactory/v20180601:PrivateEndpointConnection"), pulumi.Alias(type_="azure-nextgen:datafactory/v20180601:PrivateEndpointConnection")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(PrivateEndpointConnection, __self__).__init__(
'azure-native:datafactory:PrivateEndpointConnection',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'PrivateEndpointConnection':
"""
Get an existing PrivateEndpointConnection resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = PrivateEndpointConnectionArgs.__new__(PrivateEndpointConnectionArgs)
__props__.__dict__["etag"] = None
__props__.__dict__["name"] = None
__props__.__dict__["properties"] = None
__props__.__dict__["type"] = None
return PrivateEndpointConnection(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def etag(self) -> pulumi.Output[str]:
"""
Etag identifies change in the resource.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
The resource name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def properties(self) -> pulumi.Output['outputs.RemotePrivateEndpointConnectionResponse']:
"""
Core resource properties
"""
return pulumi.get(self, "properties")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
The resource type.
"""
return pulumi.get(self, "type")
| sdk/python/pulumi_azure_native/datafactory/private_endpoint_connection.py | 9,605 | The set of arguments for constructing a PrivateEndpointConnection resource.
:param pulumi.Input[str] factory_name: The factory name.
:param pulumi.Input[str] resource_group_name: The resource group name.
:param pulumi.Input[str] private_endpoint_connection_name: The private endpoint connection name.
:param pulumi.Input['PrivateLinkConnectionApprovalRequestArgs'] properties: Core resource properties
Private Endpoint Connection ARM resource.
API Version: 2018-06-01.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] factory_name: The factory name.
:param pulumi.Input[str] private_endpoint_connection_name: The private endpoint connection name.
:param pulumi.Input[pulumi.InputType['PrivateLinkConnectionApprovalRequestArgs']] properties: Core resource properties
:param pulumi.Input[str] resource_group_name: The resource group name.
Private Endpoint Connection ARM resource.
API Version: 2018-06-01.
:param str resource_name: The name of the resource.
:param PrivateEndpointConnectionArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
Etag identifies change in the resource.
The factory name.
Get an existing PrivateEndpointConnection resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
The resource name.
The private endpoint connection name.
Core resource properties
Core resource properties
The resource group name.
The resource type.
coding=utf-8 *** WARNING: this file was generated by the Pulumi SDK Generator. *** *** Do not edit by hand unless you're certain you know what you are doing! *** | 1,939 | en | 0.614426 |
#!/usr/bin/env python
# coding: utf-8
# # WORKFLOW PROCEDURE
# In[ ]:
# import utilities
from ds_utils import *
# to plot results
get_ipython().run_line_magic('matplotlib', 'inline')
# ## How to use this code:
#
# ### Step 1
#
# From a list of train and test datasets run the baseline_generator function and check the results
# in the output file. This file is sorted by AUC value in each dataset and algorithm. You should probably need to
# run the ROC_baseline_plot as well to get a visualization of the previous baseline results. This will give us
# an idea of the general performances. So the next step should be optimized the best model(s) using the best dataset
# according to the previous results. If you want to optimized more than one model they can be stored into a list to use a grid search
# in all models by using the nestedCV function
#
#
# ### Step 2
#
# Pick the dataset and the algorithm to optimized and pass them to the nestedCV function. This function will find the best combination of
# parameters and train a model based on it. As an output the fitted model will be returned, so there is no need to fit the model again. This
# output could be used in the next step testing these models on a unseen test set which was not used in the nestedCV phase.
#
#
# ### Step 3
#
# From a list of optimized model by the nesteCV funtion, predict classes using an unseen test set using the check_predictions_unseen_test_set.
# This function will return a file which is sorted by AUC value as well as a roc curve plot. This file will tell us the model which achieves better performance in the
# test set.
#
#
# ### Step 4
#
# Further analysis plotting some graphs such as ROC curve, PR, etc..
# In[ ]:
# set list of train and test files
listFiles_tr = ['minitrain.csv', 's.ds_MA_tr.csv']
listFiles_ts = ['minitest.csv', 's.ds_MA_ts.csv']
# run a baseline with datasets from above
baseline_generator(listFiles_tr, listFiles_ts)
# In[ ]:
# plot the ROC curves for the dataset which achieves the best performance
# as we can see 'minitrain.csv' is the dataset which seems to get better performances
# so let's plot roc curves on it.
newFile_tr = 'minitrain.csv' # new training data
newFile_ts = 'minitest.csv' # new testing data
ROC_baseline_plot(newFile_tr, newFile_ts)
# According to this baseline results it seems that GradientBoostingClassifier is a good candidate as is one of the model with higher AUC, so we can try to optimize its parameters on the minitrain dataset since is the one the suits better GradientBoostingClassifier. For simplicity we will look for parameters on an algorithm which is faster to train, let's say Logistics Regression and another one more complex such as Random Forest.
#
# So we should proceed as follows:
# Once we have decided to use a dataset we can extract its values only once. By doing this we can use some
# useful functions like the ones described below
# In[ ]:
# Since now we were using just one dataset. So we keep newFile_tr and newFile_ts from above
# Get data from that datasets
values = datasets_parser(newFile_tr, newFile_ts, outVar=outVar)
X_tr_data = values[0] # X_train data
y_tr_data = values[1] # y_train data
X_ts_data = values[2] # X_test data
y_ts_data = values[3] # y_test data
# In[ ]:
def gridsearchCV_strategy(X_tr_data, y_tr_data, list_estimators, list_params):
"""
len of list_estimators and list_params should be the same. For any
estimator you need a list of parameters to optimize. Eg
list_estimators = [RandomForestClassifier(),
LogisticRegression()]
list_params = [{'n_estimators': [500,1000],
'max_features': [8,10],
'max_depth' : [4,6,8],
'criterion' :['gini', 'entropy']},'C': [100, 1000], 'solver' : ['lbfgs'],
'max_iter' : [1000, 2000], 'n_jobs' : [-1]
}]
"""
# First check if both lists has the same length
if len(list_estimators) != len(list_params):
raise ValueError("list_estimators and list_params must have the same length")
# Estimate weights in the data used to look for parameters
class_weights = set_weights(y_tr_data)
# iterate through the list of estimators to see if any of them has some parameters such as random_state or
# class_weight or n_jobs, if so we will set them to the chosen seed for the running task and the weights estimated
# into this function which will be the ones obtained from the training data used.
for est in list_estimators:
est_params = est.get_params()
if 'class_weight' in est_params:
est.set_params(class_weight = class_weights)
if 'n_jobs' in est_params:
est.set_params(n_jobs = -1)
if 'random_state' in est_params:
est.set_params(random_state = seed)
dict_estimators_to_optimize = {}
for estimator, parameters in zip(list_estimators, list_params):
dict_estimators_to_optimize[estimator] = parameters
list_optimized_models = [nestedCV(estimator, X_tr_data, y_tr_data, param_grid=parameters)
for estimator, parameters in dict_estimators_to_optimize.items()]
#check which params were used in the list_optimized_models
#for op_model in list_optimized_models:
# print(op_model.get_params())
return list_optimized_models
# In[ ]:
# Example of execution
list_estimators = [RandomForestClassifier(),LogisticRegression()]
list_params = [{'n_estimators': [500],
'max_features': [8],
'max_depth' : [8],
'criterion' :['entropy']}, {'C': [1000], 'solver' : ['lbfgs'],
'max_iter' : [200]
}]
list_optimized_models = gridsearchCV_strategy(X_tr_data, y_tr_data, list_estimators, list_params)
# Converge warning are due to the scale of the dataset. It would be converge faster using standar_scaler
# transformation from scikit-learn
# In[ ]:
# Make predictions on unseen dataset
check_predictions_unseen_test_set(list_optimized_models, X_ts_data, y_ts_data, newFile_ts)
# In[ ]:
| workflow_procedure_example.py | 6,285 | len of list_estimators and list_params should be the same. For any
estimator you need a list of parameters to optimize. Eg
list_estimators = [RandomForestClassifier(),
LogisticRegression()]
list_params = [{'n_estimators': [500,1000],
'max_features': [8,10],
'max_depth' : [4,6,8],
'criterion' :['gini', 'entropy']},'C': [100, 1000], 'solver' : ['lbfgs'],
'max_iter' : [1000, 2000], 'n_jobs' : [-1]
}]
!/usr/bin/env python coding: utf-8 WORKFLOW PROCEDURE In[ ]: import utilities to plot results How to use this code: Step 1 From a list of train and test datasets run the baseline_generator function and check the results in the output file. This file is sorted by AUC value in each dataset and algorithm. You should probably need to run the ROC_baseline_plot as well to get a visualization of the previous baseline results. This will give us an idea of the general performances. So the next step should be optimized the best model(s) using the best dataset according to the previous results. If you want to optimized more than one model they can be stored into a list to use a grid search in all models by using the nestedCV function Step 2 Pick the dataset and the algorithm to optimized and pass them to the nestedCV function. This function will find the best combination of parameters and train a model based on it. As an output the fitted model will be returned, so there is no need to fit the model again. This output could be used in the next step testing these models on a unseen test set which was not used in the nestedCV phase. Step 3 From a list of optimized model by the nesteCV funtion, predict classes using an unseen test set using the check_predictions_unseen_test_set. This function will return a file which is sorted by AUC value as well as a roc curve plot. This file will tell us the model which achieves better performance in the test set. Step 4 Further analysis plotting some graphs such as ROC curve, PR, etc.. In[ ]: set list of train and test files run a baseline with datasets from above In[ ]: plot the ROC curves for the dataset which achieves the best performance as we can see 'minitrain.csv' is the dataset which seems to get better performances so let's plot roc curves on it. new training data new testing data According to this baseline results it seems that GradientBoostingClassifier is a good candidate as is one of the model with higher AUC, so we can try to optimize its parameters on the minitrain dataset since is the one the suits better GradientBoostingClassifier. For simplicity we will look for parameters on an algorithm which is faster to train, let's say Logistics Regression and another one more complex such as Random Forest. So we should proceed as follows: Once we have decided to use a dataset we can extract its values only once. By doing this we can use some useful functions like the ones described below In[ ]: Since now we were using just one dataset. So we keep newFile_tr and newFile_ts from above Get data from that datasets X_train data y_train data X_test data y_test data In[ ]: First check if both lists has the same length Estimate weights in the data used to look for parameters iterate through the list of estimators to see if any of them has some parameters such as random_state or class_weight or n_jobs, if so we will set them to the chosen seed for the running task and the weights estimated into this function which will be the ones obtained from the training data used.check which params were used in the list_optimized_modelsfor op_model in list_optimized_models: print(op_model.get_params()) In[ ]: Example of execution Converge warning are due to the scale of the dataset. It would be converge faster using standar_scaler transformation from scikit-learn In[ ]: Make predictions on unseen dataset In[ ]: | 3,931 | en | 0.874581 |
# pylint:disable=unused-variable
# pylint:disable=unused-argument
# pylint:disable=redefined-outer-name
# pylint:disable=protected-access
import json
import re
import urllib.parse
from collections import namedtuple
from pathlib import Path
from random import randint
from typing import Callable, List
from uuid import uuid4
import pytest
import respx
from fastapi import FastAPI, status
from models_library.services import ServiceDockerData, ServiceKeyVersion
from simcore_service_director_v2.models.schemas.services import (
RunningServiceDetails,
ServiceExtras,
)
from simcore_service_director_v2.modules.director_v0 import DirectorV0Client
@pytest.fixture(autouse=True)
def minimal_director_config(project_env_devel_environment, monkeypatch):
"""set a minimal configuration for testing the director connection only"""
monkeypatch.setenv("DIRECTOR_ENABLED", "1")
monkeypatch.setenv("POSTGRES_ENABLED", "0")
monkeypatch.setenv("CELERY_ENABLED", "0")
monkeypatch.setenv("REGISTRY_ENABLED", "0")
@pytest.fixture
def mocked_director_v0_service_api(minimal_app, entrypoint, exp_data, resp_alias):
with respx.mock(
base_url=minimal_app.state.settings.director_v0.base_url(include_tag=False),
assert_all_called=False,
assert_all_mocked=True,
) as respx_mock:
# lists services
respx_mock.get(
urllib.parse.unquote(entrypoint),
content=exp_data,
alias=resp_alias,
)
yield respx_mock
ForwardToDirectorParams = namedtuple(
"ForwardToDirectorParams", "entrypoint,exp_status,exp_data,resp_alias"
)
def _get_list_services_calls() -> List[ForwardToDirectorParams]:
return [
ForwardToDirectorParams(
entrypoint="/v0/services",
exp_status=status.HTTP_200_OK,
exp_data={"data": ["service1", "service2"]},
resp_alias="list_all_services",
),
ForwardToDirectorParams(
entrypoint="/v0/services?service_type=computational",
exp_status=status.HTTP_200_OK,
exp_data={"data": ["service1", "service2"]},
resp_alias="list_computational_services",
),
ForwardToDirectorParams(
entrypoint="/v0/services?service_type=dynamic",
exp_status=status.HTTP_200_OK,
exp_data={"data": ["service1", "service2"]},
resp_alias="list_dynamic_services",
),
]
def _get_service_version_calls() -> List[ForwardToDirectorParams]:
# TODO: here we see the return value is currently not validated
return [
ForwardToDirectorParams(
entrypoint="/v0/services/simcore%2Fservices%2Fdynamic%2Fmyservice/1.3.4",
exp_status=status.HTTP_200_OK,
exp_data={"data": ["stuff about my service"]},
resp_alias="get_service_version",
)
]
def _get_service_version_extras_calls() -> List[ForwardToDirectorParams]:
# TODO: here we see the return value is currently not validated
return [
ForwardToDirectorParams(
entrypoint="/v0/services/simcore%2Fservices%2Fdynamic%2Fmyservice/1.3.4/extras",
exp_status=status.HTTP_200_OK,
exp_data={"data": "extra stuff about my service"},
resp_alias="get_service_extras",
)
]
@pytest.mark.parametrize(
"entrypoint,exp_status,exp_data,resp_alias",
_get_list_services_calls()
+ _get_service_version_calls()
+ _get_service_version_extras_calls(),
)
def test_forward_to_director(
client, mocked_director_v0_service_api, entrypoint, exp_status, exp_data, resp_alias
):
response = client.get(entrypoint)
assert response.status_code == exp_status
assert response.json() == exp_data
assert mocked_director_v0_service_api[resp_alias].called
@pytest.fixture(scope="session")
def fake_service_details(mocks_dir: Path) -> ServiceDockerData:
fake_service_path = mocks_dir / "fake_service.json"
assert fake_service_path.exists()
fake_service_data = json.loads(fake_service_path.read_text())
return ServiceDockerData(**fake_service_data)
@pytest.fixture
def fake_service_extras(random_json_from_schema: Callable) -> ServiceExtras:
random_extras = ServiceExtras(
**random_json_from_schema(ServiceExtras.schema_json(indent=2))
)
return random_extras
@pytest.fixture
def fake_running_service_details(
random_json_from_schema: Callable,
) -> RunningServiceDetails:
random_data = random_json_from_schema(RunningServiceDetails.schema_json(indent=2))
# fix port stuff, the randomiser does not understand positive ints
KEYS_TO_FIX = ["published_port", "service_port"]
for k in KEYS_TO_FIX:
if k in random_data:
random_data[k] = randint(1, 50000)
random_details = RunningServiceDetails(**random_data)
return random_details
@pytest.fixture
def mocked_director_service_fcts(
minimal_app: FastAPI,
fake_service_details: ServiceDockerData,
fake_service_extras: ServiceExtras,
fake_running_service_details: RunningServiceDetails,
):
with respx.mock(
base_url=minimal_app.state.settings.director_v0.base_url(include_tag=False),
assert_all_called=False,
assert_all_mocked=True,
) as respx_mock:
respx_mock.get(
"/v0/services/simcore%2Fservices%2Fdynamic%2Fmyservice/1.3.4",
content={"data": [fake_service_details.dict(by_alias=True)]},
alias="get_service_version",
)
respx_mock.get(
"/v0/service_extras/simcore%2Fservices%2Fdynamic%2Fmyservice/1.3.4",
content={"data": fake_service_extras.dict(by_alias=True)},
alias="get_service_extras",
)
pattern = re.compile(
r"v0/running_interactive_services/[0-9a-fA-F]{8}-?[0-9a-fA-F]{4}-?[0-9a-fA-F]{4}-?[0-9a-fA-F]{4}-?[0-9a-fA-F]{12}$"
)
respx_mock.get(
pattern,
content={"data": fake_running_service_details.dict(by_alias=True)},
alias="get_running_service_details",
)
yield respx_mock
async def test_get_service_details(
minimal_app: FastAPI,
mocked_director_service_fcts,
fake_service_details: ServiceDockerData,
):
director_client: DirectorV0Client = minimal_app.state.director_v0_client
service = ServiceKeyVersion(
key="simcore/services/dynamic/myservice", version="1.3.4"
)
service_details: ServiceDockerData = await director_client.get_service_details(
service
)
assert mocked_director_service_fcts["get_service_version"].called
assert fake_service_details == service_details
async def test_get_service_extras(
minimal_app: FastAPI,
mocked_director_service_fcts,
fake_service_extras: ServiceExtras,
):
director_client: DirectorV0Client = minimal_app.state.director_v0_client
service = ServiceKeyVersion(
key="simcore/services/dynamic/myservice", version="1.3.4"
)
service_extras: ServiceExtras = await director_client.get_service_extras(service)
assert mocked_director_service_fcts["get_service_extras"].called
assert fake_service_extras == service_extras
async def test_get_running_service_details(
minimal_app: FastAPI,
mocked_director_service_fcts,
fake_running_service_details: RunningServiceDetails,
):
director_client: DirectorV0Client = minimal_app.state.director_v0_client
service_details: RunningServiceDetails = (
await director_client.get_running_service_details(str(uuid4()))
)
assert mocked_director_service_fcts["get_running_service_details"].called
assert fake_running_service_details == service_details
| services/director-v2/tests/unit/test_modules_director_v0.py | 7,725 | set a minimal configuration for testing the director connection only
pylint:disable=unused-variable pylint:disable=unused-argument pylint:disable=redefined-outer-name pylint:disable=protected-access lists services TODO: here we see the return value is currently not validated TODO: here we see the return value is currently not validated fix port stuff, the randomiser does not understand positive ints | 404 | en | 0.693469 |
#!/usr/bin/env python3
# Copyright (c) 2015-2018 The Dash Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
from test_framework.blocktools import get_masternode_payment, create_coinbase, create_block
from test_framework.mininode import *
from test_framework.test_framework import cryptocauseTestFramework
from test_framework.util import *
from time import *
'''
llmq-is-cl-conflicts.py
Checks conflict handling between ChainLocks and InstantSend
'''
class TestNode(SingleNodeConnCB):
def __init__(self):
SingleNodeConnCB.__init__(self)
self.clsigs = {}
self.islocks = {}
def send_clsig(self, clsig):
hash = uint256_from_str(hash256(clsig.serialize()))
self.clsigs[hash] = clsig
inv = msg_inv([CInv(29, hash)])
self.send_message(inv)
def send_islock(self, islock):
hash = uint256_from_str(hash256(islock.serialize()))
self.islocks[hash] = islock
inv = msg_inv([CInv(30, hash)])
self.send_message(inv)
def on_getdata(self, conn, message):
for inv in message.inv:
if inv.hash in self.clsigs:
self.send_message(self.clsigs[inv.hash])
if inv.hash in self.islocks:
self.send_message(self.islocks[inv.hash])
class LLMQ_IS_CL_Conflicts(cryptocauseTestFramework):
def __init__(self):
super().__init__(6, 5, [], fast_dip3_enforcement=True)
#disable_mocktime()
def run_test(self):
while self.nodes[0].getblockchaininfo()["bip9_softforks"]["dip0008"]["status"] != "active":
self.nodes[0].generate(10)
sync_blocks(self.nodes, timeout=60*5)
self.test_node = TestNode()
self.test_node.add_connection(NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], self.test_node))
NetworkThread().start() # Start up network handling in another thread
self.test_node.wait_for_verack()
self.nodes[0].spork("SPORK_17_QUORUM_DKG_ENABLED", 0)
self.nodes[0].spork("SPORK_19_CHAINLOCKS_ENABLED", 0)
self.nodes[0].spork("SPORK_20_INSTANTSEND_LLMQ_BASED", 0)
self.wait_for_sporks_same()
self.mine_quorum()
# mine single block, wait for chainlock
self.nodes[0].generate(1)
self.wait_for_chainlock_tip_all_nodes()
self.test_chainlock_overrides_islock(False)
self.test_chainlock_overrides_islock(True)
self.test_islock_overrides_nonchainlock()
def test_chainlock_overrides_islock(self, test_block_conflict):
# create three raw TXs, they will conflict with each other
rawtx1 = self.create_raw_tx(self.nodes[0], self.nodes[0], 1, 1, 100)['hex']
rawtx2 = self.create_raw_tx(self.nodes[0], self.nodes[0], 1, 1, 100)['hex']
rawtx3 = self.create_raw_tx(self.nodes[0], self.nodes[0], 1, 1, 100)['hex']
rawtx1_obj = FromHex(CTransaction(), rawtx1)
rawtx2_obj = FromHex(CTransaction(), rawtx2)
rawtx3_obj = FromHex(CTransaction(), rawtx3)
rawtx1_txid = self.nodes[0].sendrawtransaction(rawtx1)
rawtx2_txid = encode(hash256(hex_str_to_bytes(rawtx2))[::-1], 'hex_codec').decode('ascii')
rawtx3_txid = encode(hash256(hex_str_to_bytes(rawtx3))[::-1], 'hex_codec').decode('ascii')
# Create a chained TX on top of tx1
inputs = []
n = 0
for out in rawtx1_obj.vout:
if out.nValue == 100000000:
inputs.append({"txid": rawtx1_txid, "vout": n})
n += 1
rawtx4 = self.nodes[0].createrawtransaction(inputs, {self.nodes[0].getnewaddress(): 0.999})
rawtx4 = self.nodes[0].signrawtransaction(rawtx4)['hex']
rawtx4_txid = self.nodes[0].sendrawtransaction(rawtx4)
for node in self.nodes:
self.wait_for_instantlock(rawtx1_txid, node)
self.wait_for_instantlock(rawtx4_txid, node)
block = self.create_block(self.nodes[0], [rawtx2_obj])
if test_block_conflict:
submit_result = self.nodes[0].submitblock(ToHex(block))
assert(submit_result == "conflict-tx-lock")
cl = self.create_chainlock(self.nodes[0].getblockcount() + 1, block.sha256)
self.test_node.send_clsig(cl)
# Give the CLSIG some time to propagate. We unfortunately can't check propagation here as "getblock/getblockheader"
# is required to check for CLSIGs, but this requires the block header to be propagated already
sleep(1)
# The block should get accepted now, and at the same time prune the conflicting ISLOCKs
submit_result = self.nodes[1].submitblock(ToHex(block))
if test_block_conflict:
assert(submit_result == "duplicate")
else:
assert(submit_result is None)
for node in self.nodes:
self.wait_for_chainlock(node, "%064x" % block.sha256)
# Create a chained TX on top of tx2
inputs = []
n = 0
for out in rawtx2_obj.vout:
if out.nValue == 100000000:
inputs.append({"txid": rawtx2_txid, "vout": n})
n += 1
rawtx5 = self.nodes[0].createrawtransaction(inputs, {self.nodes[0].getnewaddress(): 0.999})
rawtx5 = self.nodes[0].signrawtransaction(rawtx5)['hex']
rawtx5_txid = self.nodes[0].sendrawtransaction(rawtx5)
for node in self.nodes:
self.wait_for_instantlock(rawtx5_txid, node)
# Lets verify that the ISLOCKs got pruned
for node in self.nodes:
assert_raises_jsonrpc(-5, "No such mempool or blockchain transaction", node.getrawtransaction, rawtx1_txid, True)
assert_raises_jsonrpc(-5, "No such mempool or blockchain transaction", node.getrawtransaction, rawtx4_txid, True)
rawtx = node.getrawtransaction(rawtx2_txid, True)
assert(rawtx['chainlock'])
assert(rawtx['instantlock'])
assert(not rawtx['instantlock_internal'])
def test_islock_overrides_nonchainlock(self):
# create two raw TXs, they will conflict with each other
rawtx1 = self.create_raw_tx(self.nodes[0], self.nodes[0], 1, 1, 100)['hex']
rawtx2 = self.create_raw_tx(self.nodes[0], self.nodes[0], 1, 1, 100)['hex']
rawtx1_txid = encode(hash256(hex_str_to_bytes(rawtx1))[::-1], 'hex_codec').decode('ascii')
rawtx2_txid = encode(hash256(hex_str_to_bytes(rawtx2))[::-1], 'hex_codec').decode('ascii')
# Create an ISLOCK but don't broadcast it yet
islock = self.create_islock(rawtx2)
# Stop enough MNs so that ChainLocks don't work anymore
for i in range(3):
self.stop_node(len(self.nodes) - 1)
self.nodes.pop(len(self.nodes) - 1)
self.mninfo.pop(len(self.mninfo) - 1)
# Send tx1, which will later conflict with the ISLOCK
self.nodes[0].sendrawtransaction(rawtx1)
# fast forward 11 minutes, so that the TX is considered safe and included in the next block
set_mocktime(get_mocktime() + int(60 * 11))
set_node_times(self.nodes, get_mocktime())
# Mine the conflicting TX into a block
good_tip = self.nodes[0].getbestblockhash()
self.nodes[0].generate(2)
self.sync_all()
# Assert that the conflicting tx got mined and the locked TX is not valid
assert(self.nodes[0].getrawtransaction(rawtx1_txid, True)['confirmations'] > 0)
assert_raises_jsonrpc(-25, "Missing inputs", self.nodes[0].sendrawtransaction, rawtx2)
# Send the ISLOCK, which should result in the last 2 blocks to be invalidated, even though the nodes don't know
# the locked transaction yet
self.test_node.send_islock(islock)
sleep(5)
assert(self.nodes[0].getbestblockhash() == good_tip)
assert(self.nodes[1].getbestblockhash() == good_tip)
# Send the actual transaction and mine it
self.nodes[0].sendrawtransaction(rawtx2)
self.nodes[0].generate(1)
self.sync_all()
assert(self.nodes[0].getrawtransaction(rawtx2_txid, True)['confirmations'] > 0)
assert(self.nodes[1].getrawtransaction(rawtx2_txid, True)['confirmations'] > 0)
assert(self.nodes[0].getrawtransaction(rawtx2_txid, True)['instantlock'])
assert(self.nodes[1].getrawtransaction(rawtx2_txid, True)['instantlock'])
assert(self.nodes[0].getbestblockhash() != good_tip)
assert(self.nodes[1].getbestblockhash() != good_tip)
def wait_for_chainlock_tip_all_nodes(self):
for node in self.nodes:
tip = node.getbestblockhash()
self.wait_for_chainlock(node, tip)
def wait_for_chainlock_tip(self, node):
tip = node.getbestblockhash()
self.wait_for_chainlock(node, tip)
def wait_for_chainlock(self, node, block_hash):
t = time()
while time() - t < 15:
try:
block = node.getblockheader(block_hash)
if block["confirmations"] > 0 and block["chainlock"]:
return
except:
# block might not be on the node yet
pass
sleep(0.1)
raise AssertionError("wait_for_chainlock timed out")
def create_block(self, node, vtx=[]):
bt = node.getblocktemplate()
height = bt['height']
tip_hash = bt['previousblockhash']
coinbasevalue = bt['coinbasevalue']
miner_address = node.getnewaddress()
mn_payee = bt['masternode'][0]['payee']
# calculate fees that the block template included (we'll have to remove it from the coinbase as we won't
# include the template's transactions
bt_fees = 0
for tx in bt['transactions']:
bt_fees += tx['fee']
new_fees = 0
for tx in vtx:
in_value = 0
out_value = 0
for txin in tx.vin:
txout = node.gettxout("%064x" % txin.prevout.hash, txin.prevout.n, False)
in_value += int(txout['value'] * COIN)
for txout in tx.vout:
out_value += txout.nValue
new_fees += in_value - out_value
# fix fees
coinbasevalue -= bt_fees
coinbasevalue += new_fees
mn_amount = get_masternode_payment(height, coinbasevalue)
miner_amount = coinbasevalue - mn_amount
outputs = {miner_address: str(Decimal(miner_amount) / COIN)}
if mn_amount > 0:
outputs[mn_payee] = str(Decimal(mn_amount) / COIN)
coinbase = FromHex(CTransaction(), node.createrawtransaction([], outputs))
coinbase.vin = create_coinbase(height).vin
# We can't really use this one as it would result in invalid merkle roots for masternode lists
if len(bt['coinbase_payload']) != 0:
cbtx = FromHex(CCbTx(version=1), bt['coinbase_payload'])
coinbase.nVersion = 3
coinbase.nType = 5 # CbTx
coinbase.vExtraPayload = cbtx.serialize()
coinbase.calc_sha256()
block = create_block(int(tip_hash, 16), coinbase, nTime=bt['curtime'])
block.vtx += vtx
# Add quorum commitments from template
for tx in bt['transactions']:
tx2 = FromHex(CTransaction(), tx['data'])
if tx2.nType == 6:
block.vtx.append(tx2)
block.hashMerkleRoot = block.calc_merkle_root()
block.solve()
return block
def create_chainlock(self, height, blockHash):
request_id = "%064x" % uint256_from_str(hash256(ser_string(b"clsig") + struct.pack("<I", height)))
message_hash = "%064x" % blockHash
for mn in self.mninfo:
mn.node.quorum('sign', 100, request_id, message_hash)
recSig = None
t = time()
while time() - t < 10:
try:
recSig = self.nodes[0].quorum('getrecsig', 100, request_id, message_hash)
break
except:
sleep(0.1)
assert(recSig is not None)
clsig = msg_clsig(height, blockHash, hex_str_to_bytes(recSig['sig']))
return clsig
def create_islock(self, hextx):
tx = FromHex(CTransaction(), hextx)
tx.rehash()
request_id_buf = ser_string(b"islock") + ser_compact_size(len(tx.vin))
inputs = []
for txin in tx.vin:
request_id_buf += txin.prevout.serialize()
inputs.append(txin.prevout)
request_id = "%064x" % uint256_from_str(hash256(request_id_buf))
message_hash = "%064x" % tx.sha256
for mn in self.mninfo:
mn.node.quorum('sign', 100, request_id, message_hash)
recSig = None
t = time()
while time() - t < 10:
try:
recSig = self.nodes[0].quorum('getrecsig', 100, request_id, message_hash)
break
except:
sleep(0.1)
assert(recSig is not None)
islock = msg_islock(inputs, tx.sha256, hex_str_to_bytes(recSig['sig']))
return islock
if __name__ == '__main__':
LLMQ_IS_CL_Conflicts().main()
| qa/rpc-tests/llmq-is-cl-conflicts.py | 13,224 | !/usr/bin/env python3 Copyright (c) 2015-2018 The Dash Core developers Distributed under the MIT software license, see the accompanying file COPYING or http://www.opensource.org/licenses/mit-license.php.disable_mocktime() Start up network handling in another thread mine single block, wait for chainlock create three raw TXs, they will conflict with each other Create a chained TX on top of tx1 Give the CLSIG some time to propagate. We unfortunately can't check propagation here as "getblock/getblockheader" is required to check for CLSIGs, but this requires the block header to be propagated already The block should get accepted now, and at the same time prune the conflicting ISLOCKs Create a chained TX on top of tx2 Lets verify that the ISLOCKs got pruned create two raw TXs, they will conflict with each other Create an ISLOCK but don't broadcast it yet Stop enough MNs so that ChainLocks don't work anymore Send tx1, which will later conflict with the ISLOCK fast forward 11 minutes, so that the TX is considered safe and included in the next block Mine the conflicting TX into a block Assert that the conflicting tx got mined and the locked TX is not valid Send the ISLOCK, which should result in the last 2 blocks to be invalidated, even though the nodes don't know the locked transaction yet Send the actual transaction and mine it block might not be on the node yet calculate fees that the block template included (we'll have to remove it from the coinbase as we won't include the template's transactions fix fees We can't really use this one as it would result in invalid merkle roots for masternode lists CbTx Add quorum commitments from template | 1,660 | en | 0.913163 |
#!/usr/bin/env python
#
# @file test_signals.py
#
# @author Matt Gigli <mjgigli@gmail.com>
#
# @section LICENSE
#
# The MIT License (MIT)
# Copyright (c) 2016 Matt Gigli
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE
# OR OTHER DEALINGS IN THE SOFTWARE.
#
import unittest
from ao.signals import dispatcher
class test_signals(unittest.TestCase):
def setUp(self):
self.cb1 = 0
self.cb2 = 0
self.cb3 = 0
self.cb4 = 0
self.cb5 = 0
self.cb6 = 0
self.cb_arg1 = None
self.cb_arg2 = None
def tearDown(self):
dispatcher.unsubscribe_all()
def callback_1(self):
self.cb1 = 1
def callback_2(self):
self.cb2 = 2
def callback_1234(self):
self.cb1 = 1
self.cb2 = 2
self.cb3 = 3
self.cb4 = 4
def callback_34(self):
self.cb3 = 3
self.cb4 = 4
def callback_56(self):
self.cb5 = 5
self.cb6 = 6
def callback_args(self, arg1, arg2=None):
self.cb_arg1 = arg1
self.cb_arg2 = arg2
def test_one_receiver(self):
# verify test class members are reset
self.assertEqual(self.cb1, 0)
# subscribe to the cb1_sig signal
dispatcher.subscribe('cb1_sig', self.callback_1)
# publish the signal to call the callback
dispatcher.publish('cb1_sig')
# verify callback was called
self.assertEqual(self.cb1, 1)
def test_arguments(self):
# verify test class members are reset
self.assertEqual(self.cb_arg1, None)
self.assertEqual(self.cb_arg2, None)
# subscribe to the cbarg_sig signal
dispatcher.subscribe('cbarg_sig', self.callback_args)
# publish the signal to call the callback
dispatcher.publish('cbarg_sig', 'arg1', arg2='arg2')
# verify callback was called
self.assertEqual(self.cb_arg1, 'arg1')
self.assertEqual(self.cb_arg2, 'arg2')
def test_multiple_receivers(self):
# verify test class members are reset
self.assertEqual(self.cb1, 0)
self.assertEqual(self.cb2, 0)
self.assertEqual(self.cb3, 0)
self.assertEqual(self.cb4, 0)
# subscribe to the signals
dispatcher.subscribe('cb2_sig', self.callback_1234)
# publish the signal to call the callbacks
dispatcher.publish('cb2_sig')
# verify callbacks were called
self.assertEqual(self.cb1, 1)
self.assertEqual(self.cb2, 2)
self.assertEqual(self.cb3, 3)
self.assertEqual(self.cb4, 4)
def test_publish_unsubscribed_signal(self):
# publish a signal that hasn't been subscribed to, to verify that no
# error occurs when publishing such a signal
dispatcher.publish('lonely_sig')
def test_unsubscribe_unsubscribed_signal(self):
# verify no exception is raised when unsubscribing a receiver from a
# signal that was never subscribed to
dispatcher.unsubscribe('lonely_sig', self.callback_1)
def test_unsubscribe(self):
# subscribe, publish and check that callback was called
dispatcher.subscribe('cb1_sig', self.callback_1)
dispatcher.publish('cb1_sig')
self.assertEqual(self.cb1, 1)
# reset cb1, unsubscribe and show that callback is not called
self.cb1 = 0
dispatcher.unsubscribe('cb1_sig', self.callback_1)
dispatcher.publish('cb1_sig')
self.assertEqual(self.cb1, 0)
def test_unsubscribe_all_for_signal(self):
# subscribe some receivers for some signals
dispatcher.subscribe('cb1_sig', self.callback_1)
dispatcher.subscribe('cb1_sig', self.callback_2)
dispatcher.subscribe('cb3_sig', self.callback_34)
dispatcher.subscribe('cb3_sig', self.callback_56)
# unsuscribe just for cb1_sig
dispatcher.unsubscribe_all('cb1_sig')
# verify only cb1_sig receivers were unsubscribed
dispatcher.publish('cb1_sig')
dispatcher.publish('cb3_sig')
self.assertEqual(self.cb1, 0)
self.assertEqual(self.cb2, 0)
self.assertEqual(self.cb3, 3)
self.assertEqual(self.cb4, 4)
self.assertEqual(self.cb5, 5)
self.assertEqual(self.cb6, 6)
def test_unsubscribe_all(self):
dispatcher.subscribe('cb1_sig', self.callback_1)
dispatcher.subscribe('cb1_sig', self.callback_2)
dispatcher.subscribe('cb3_sig', self.callback_34)
dispatcher.subscribe('cb3_sig', self.callback_56)
# unsuscribe all signals
dispatcher.unsubscribe_all()
# verify all receivers were unsubscribed
dispatcher.publish('cb1_sig')
dispatcher.publish('cb3_sig')
self.assertEqual(self.cb1, 0)
self.assertEqual(self.cb2, 0)
self.assertEqual(self.cb3, 0)
self.assertEqual(self.cb4, 0)
self.assertEqual(self.cb5, 0)
self.assertEqual(self.cb6, 0)
| test/test_signals.py | 5,941 | !/usr/bin/env python @file test_signals.py @author Matt Gigli <mjgigli@gmail.com> @section LICENSE The MIT License (MIT) Copyright (c) 2016 Matt Gigli Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. verify test class members are reset subscribe to the cb1_sig signal publish the signal to call the callback verify callback was called verify test class members are reset subscribe to the cbarg_sig signal publish the signal to call the callback verify callback was called verify test class members are reset subscribe to the signals publish the signal to call the callbacks verify callbacks were called publish a signal that hasn't been subscribed to, to verify that no error occurs when publishing such a signal verify no exception is raised when unsubscribing a receiver from a signal that was never subscribed to subscribe, publish and check that callback was called reset cb1, unsubscribe and show that callback is not called subscribe some receivers for some signals unsuscribe just for cb1_sig verify only cb1_sig receivers were unsubscribed unsuscribe all signals verify all receivers were unsubscribed | 2,081 | en | 0.924985 |
"""Various functions that interact with Slack, e.g. posting messages."""
import asyncio
import logging
import socket
from pathlib import Path
from typing import Union, Optional
from slack_sdk.errors import SlackApiError
from lsw_slackbot.plots import plot_resource_use
from lsw_slackbot.resources import current_memory_fraction, _get_resource_usage_dataframe
from lsw_slackbot.util import string_time
async def _send_message(client, channel: str, message: str):
"""Sends a message to a channel, with basic logging & error handling."""
try:
await client.chat_postMessage(channel=channel, text=message)
# Handle various different errors, *some* of which are non-critical...
except SlackApiError as e:
logging.exception(f"error from slack API when trying to send message: {e.response['error']}")
print("Encountered SlackApiError when trying to send message (see logs.)")
except AttributeError:
logging.exception("suspected issue in Slack API when trying to send message. This bug has occured before!")
print("Encountered AttributeError when trying to send message (see logs.)")
async def _send_file(client, channel: str, file: Union[Path, str], title):
"""Sends a file to a channel, with basic logging & error handling."""
if isinstance(file, Path):
file = str(file.absolute())
try:
await client.files_upload(channels=channel, file=file, title=title)
# Handle various different errors, *some* of which are non-critical...
except SlackApiError as e:
logging.exception(f"error from Slack API when trying to upload file: {e.response['error']}")
print("Encountered SlackApiError when trying to upload file (see logs.)")
except AttributeError:
logging.exception("suspected issue in Slack API when trying to upload file. This bug has occured before!")
print("Encountered AttributeError when trying to upload file (see logs.)")
async def hello_world(client, channel: str):
"""Basic function to post an init message to a channel."""
# Todo: it would be really cool if hello_world also printed the latest commit message.
# This could be done by running the command `git log -1` from Python?
# See https://stackoverflow.com/questions/7293008/display-last-git-commit-comment
logging.info(f"Saying hello world in {channel}!")
system_name = socket.gethostname()
await _send_message(
client, channel, f"Server time & date: {string_time()}\nApp is running on system {system_name}.")
async def send_resource_use_plot(client, channel: str, plot_kwargs: dict, title: Optional[str] = None):
"""Sends a resource usage plot to a given channel."""
if title is None:
title = f"Resource usage plot generated at {string_time()}"
else:
title = title + f" (plot generated at {string_time()})"
# Firstly, let's generate a plot
logging.info("Generating a resource usage plot")
logging.debug(f"plot kwargs: {plot_kwargs}")
location_plot = await plot_resource_use(**plot_kwargs)
# Now, let's try and send it to slack
logging.info(f"Sending to Slack in channel {channel}")
await _send_file(client, channel, location_plot, title)
_LAST_MEMORY_FRACTION = 0.0
async def check_memory(client, channel: str, memory_warn_fraction=0.8, sleep_time=3600):
"""Quick function for checking current server memory and sending a warning to a desired channel if it's
too high."""
global _LAST_MEMORY_FRACTION # Sorry for using global variables =(
current_usage = current_memory_fraction()
# Only warn if we didn't warn before
if _LAST_MEMORY_FRACTION < memory_warn_fraction:
if current_usage > memory_warn_fraction:
# Firstly, prioritise sending a basic warning
await _send_message(client, channel, f"WARNING: current memory usage at {current_usage:.2%}!")
# Next, grab info on currently running threads
thread_df = await _get_resource_usage_dataframe(measurement_time=1.0)
thread_df = thread_df.sort_values("memory")
# ... and format it into something we can send
message = ["Users with something currently running:"]
for i, a_row in thread_df.iterrows():
message.append(f"{a_row.name}: {a_row['cpu_percent']:.2f}% CPU "
f"-- {a_row['memory']:.2f} GB"
f"-- {a_row['threads']} threads")
message.append(f"\n(no further warnings will be sent for a sleep period of {sleep_time/60**2:.2f} hour(s))")
# Send it!
await _send_message(client, channel, "\n".join(message))
# Sleep so we don't spam the chat
await asyncio.sleep(sleep_time)
_LAST_MEMORY_FRACTION = current_usage
| lsw_slackbot/slack.py | 4,849 | Various functions that interact with Slack, e.g. posting messages.
Handle various different errors, *some* of which are non-critical... Handle various different errors, *some* of which are non-critical... Todo: it would be really cool if hello_world also printed the latest commit message. This could be done by running the command `git log -1` from Python? See https://stackoverflow.com/questions/7293008/display-last-git-commit-comment Firstly, let's generate a plot Now, let's try and send it to slack Sorry for using global variables =( Only warn if we didn't warn before Firstly, prioritise sending a basic warning Next, grab info on currently running threads ... and format it into something we can send Send it! Sleep so we don't spam the chat | 756 | en | 0.877996 |
from contextlib import contextmanager
import platform
import shlex
from subprocess import PIPE, Popen
from shutil import which
class ShellCommandResult(tuple):
"""
The result of a :func:`coalib.misc.run_shell_command` call.
It is based on a ``(stdout, stderr)`` string tuple like it is returned
form ``subprocess.Popen.communicate`` and was originally returned from
:func:`coalib.misc.run_shell_command`. So it is backwards-compatible.
It additionally stores the return ``.code``:
>>> process = Popen(['python', '-c',
... 'import sys; print(sys.stdin.readline().strip() +'
... ' " processed")'],
... stdin=PIPE, stdout=PIPE, stderr=PIPE,
... universal_newlines=True)
>>> stdout, stderr = process.communicate(input='data')
>>> stderr
''
>>> result = ShellCommandResult(process.returncode, stdout, stderr)
>>> result[0]
'data processed\\n'
>>> result[1]
''
>>> result.code
0
"""
def __new__(cls, code, stdout, stderr):
"""
Creates the basic tuple from `stdout` and `stderr`.
"""
return tuple.__new__(cls, (stdout, stderr))
def __init__(self, code, stdout, stderr):
"""
Stores the return `code`.
"""
self.code = code
@contextmanager
def run_interactive_shell_command(command, **kwargs):
"""
Runs a single command in shell and provides stdout, stderr and stdin
streams.
This function creates a context manager that sets up the process (using
``subprocess.Popen()``), returns to caller and waits for process to exit on
leaving.
By default the process is opened in ``universal_newlines`` mode and creates
pipes for all streams (stdout, stderr and stdin) using ``subprocess.PIPE``
special value. These pipes are closed automatically, so if you want to get
the contents of the streams you should retrieve them before the context
manager exits.
>>> with run_interactive_shell_command(["echo", "TEXT"]) as p:
... stdout = p.stdout
... stdout_text = stdout.read()
>>> stdout_text
'TEXT\\n'
>>> stdout.closed
True
Custom streams provided are not closed except of ``subprocess.PIPE``.
>>> from tempfile import TemporaryFile
>>> stream = TemporaryFile()
>>> with run_interactive_shell_command(["echo", "TEXT"],
... stdout=stream) as p:
... stderr = p.stderr
>>> stderr.closed
True
>>> stream.closed
False
:param command: The command to run on shell. This parameter can either
be a sequence of arguments that are directly passed to
the process or a string. A string gets splitted beforehand
using ``shlex.split()``. If providing ``shell=True`` as a
keyword-argument, no ``shlex.split()`` is performed and the
command string goes directly to ``subprocess.Popen()``.
:param kwargs: Additional keyword arguments to pass to
``subprocess.Popen`` that are used to spawn the process.
:return: A context manager yielding the process started from the
command.
"""
if not kwargs.get('shell', False) and isinstance(command, str):
command = shlex.split(command)
else:
command = list(command)
if platform.system() == 'Windows': # pragma: no cover
# subprocess doesn't implicitly look for .bat and .cmd scripts when
# running commands under Windows
command[0] = which(command[0])
args = {'stdout': PIPE,
'stderr': PIPE,
'stdin': PIPE,
'universal_newlines': True}
args.update(kwargs)
process = Popen(command, **args)
try:
yield process
finally:
if args['stdout'] is PIPE:
process.stdout.close()
if args['stderr'] is PIPE:
process.stderr.close()
if args['stdin'] is PIPE:
process.stdin.close()
process.wait()
def run_shell_command(command, stdin=None, **kwargs):
"""
Runs a single command in shell and returns the read stdout and stderr data.
This function waits for the process (created using ``subprocess.Popen()``)
to exit. Effectively it wraps ``run_interactive_shell_command()`` and uses
``communicate()`` on the process.
See also ``run_interactive_shell_command()``.
:param command: The command to run on shell. This parameter can either
be a sequence of arguments that are directly passed to
the process or a string. A string gets splitted beforehand
using ``shlex.split()``.
:param stdin: Initial input to send to the process.
:param kwargs: Additional keyword arguments to pass to
``subprocess.Popen`` that is used to spawn the process.
:return: A tuple with ``(stdoutstring, stderrstring)``.
"""
with run_interactive_shell_command(command, **kwargs) as p:
ret = p.communicate(stdin)
return ShellCommandResult(p.returncode, *ret)
def get_shell_type(): # pragma: no cover
"""
Finds the current shell type based on the outputs of common pre-defined
variables in them. This is useful to identify which sort of escaping
is required for strings.
:return: The shell type. This can be either "powershell" if Windows
Powershell is detected, "cmd" if command prompt is been
detected or "sh" if it's neither of these.
"""
out = run_shell_command('echo $host.name', shell=True)[0]
if out.strip() == 'ConsoleHost':
return 'powershell'
out = run_shell_command('echo $0', shell=True)[0]
if out.strip() == '$0':
return 'cmd'
return 'sh'
| venv/lib/python3.5/site-packages/coalib/misc/Shell.py | 5,916 | The result of a :func:`coalib.misc.run_shell_command` call.
It is based on a ``(stdout, stderr)`` string tuple like it is returned
form ``subprocess.Popen.communicate`` and was originally returned from
:func:`coalib.misc.run_shell_command`. So it is backwards-compatible.
It additionally stores the return ``.code``:
>>> process = Popen(['python', '-c',
... 'import sys; print(sys.stdin.readline().strip() +'
... ' " processed")'],
... stdin=PIPE, stdout=PIPE, stderr=PIPE,
... universal_newlines=True)
>>> stdout, stderr = process.communicate(input='data')
>>> stderr
''
>>> result = ShellCommandResult(process.returncode, stdout, stderr)
>>> result[0]
'data processed\n'
>>> result[1]
''
>>> result.code
0
Stores the return `code`.
Creates the basic tuple from `stdout` and `stderr`.
Finds the current shell type based on the outputs of common pre-defined
variables in them. This is useful to identify which sort of escaping
is required for strings.
:return: The shell type. This can be either "powershell" if Windows
Powershell is detected, "cmd" if command prompt is been
detected or "sh" if it's neither of these.
Runs a single command in shell and provides stdout, stderr and stdin
streams.
This function creates a context manager that sets up the process (using
``subprocess.Popen()``), returns to caller and waits for process to exit on
leaving.
By default the process is opened in ``universal_newlines`` mode and creates
pipes for all streams (stdout, stderr and stdin) using ``subprocess.PIPE``
special value. These pipes are closed automatically, so if you want to get
the contents of the streams you should retrieve them before the context
manager exits.
>>> with run_interactive_shell_command(["echo", "TEXT"]) as p:
... stdout = p.stdout
... stdout_text = stdout.read()
>>> stdout_text
'TEXT\n'
>>> stdout.closed
True
Custom streams provided are not closed except of ``subprocess.PIPE``.
>>> from tempfile import TemporaryFile
>>> stream = TemporaryFile()
>>> with run_interactive_shell_command(["echo", "TEXT"],
... stdout=stream) as p:
... stderr = p.stderr
>>> stderr.closed
True
>>> stream.closed
False
:param command: The command to run on shell. This parameter can either
be a sequence of arguments that are directly passed to
the process or a string. A string gets splitted beforehand
using ``shlex.split()``. If providing ``shell=True`` as a
keyword-argument, no ``shlex.split()`` is performed and the
command string goes directly to ``subprocess.Popen()``.
:param kwargs: Additional keyword arguments to pass to
``subprocess.Popen`` that are used to spawn the process.
:return: A context manager yielding the process started from the
command.
Runs a single command in shell and returns the read stdout and stderr data.
This function waits for the process (created using ``subprocess.Popen()``)
to exit. Effectively it wraps ``run_interactive_shell_command()`` and uses
``communicate()`` on the process.
See also ``run_interactive_shell_command()``.
:param command: The command to run on shell. This parameter can either
be a sequence of arguments that are directly passed to
the process or a string. A string gets splitted beforehand
using ``shlex.split()``.
:param stdin: Initial input to send to the process.
:param kwargs: Additional keyword arguments to pass to
``subprocess.Popen`` that is used to spawn the process.
:return: A tuple with ``(stdoutstring, stderrstring)``.
pragma: no cover subprocess doesn't implicitly look for .bat and .cmd scripts when running commands under Windows pragma: no cover | 3,889 | en | 0.768537 |
#!/usr/bin/env python
import numpy as np
def initialize_hyper_parameters(layer_acts, learning_rate):
"""
Initialize parameters for different levels of the network
Arguments:
layer_acts -- python array (list) containing the activation functions of each layer in the network
learning_rate -- float value used as constant for gradient descent
Returns:
hyper_parameters -- python dictionary containing hyper_parameters (can be further extended)
"""
hyper_parameters = {}
activations = {}
L = len(layer_acts) # number of layers in the network
for l in range(0, L):
activations[l+1] = layer_acts[l]
hyper_parameters["activations"] = activations
hyper_parameters["learning_rate"] = learning_rate
return hyper_parameters
def test_initialize_hyper_parameters():
print("\033[92m" + "\nTest initialize_hyper_parameters() ..." + "\033[0m")
layer_acts = ["relu", "relu", "sigmoid"]
learning_rate = 0.0075
hyper_parameters = initialize_hyper_parameters(layer_acts, learning_rate)
print(hyper_parameters["activations"])
assert len(hyper_parameters["activations"]) == 3
assert hyper_parameters["activations"][1] == "relu"
print("\033[92m" + "... end test" + "\033[0m")
def initialize_parameters(layer_dims):
"""
Initialize parameters for different levels of the network
Arguments:
layer_dims -- python array (list) containing the dimensions of each layer in the network
Returns:
parameters -- python dictionary containing your parameters "W1", "b1", ..., "WL", "bL", ...:
Wl -- weight matrix of shape (layer_dims[l], layer_dims[l-1])
bl -- bias vector of shape (layer_dims[l], 1)
"""
np.random.seed(1)
parameters = {}
L = len(layer_dims) # number of layers in the network
for l in range(1, L):
parameters['W' + str(l)] = np.random.randn(layer_dims[l], layer_dims[l-1]) * 0.01
parameters['b' + str(l)] = np.zeros((layer_dims[l], 1))
assert(parameters['W' + str(l)].shape == (layer_dims[l], layer_dims[l-1]))
assert(parameters['b' + str(l)].shape == (layer_dims[l], 1))
return parameters
def test_initialize_parameters():
print("\n" + "\033[92m" + "Test initialize_parameters() ..." + "\033[0m")
np.random.seed(1)
parameters = initialize_parameters([3,2,1])
print("W1 = " + str(parameters["W1"]))
print("b1 = " + str(parameters["b1"]))
print("W2 = " + str(parameters["W2"]))
print("b2 = " + str(parameters["b2"]))
W1 = parameters["W1"]
W1_expected = np.array([[0.01624345,-0.00611756,-0.00528172],[-0.01072969,0.00865408,-0.02301539]])
assert np.allclose(W1, W1_expected, rtol=1e-05, atol=1e-06)
b1 = parameters["b1"]
b1_expected = np.array([[0.],[0.]])
assert np.allclose(b1, b1_expected, rtol=1e-05, atol=1e-06)
W2 = parameters["W2"]
W2_expected = np.array([[0.01744812, -0.00761207]])
assert np.allclose(W2, W2_expected, rtol=1e-05, atol=1e-06)
b2 = parameters["b2"]
b2_expected = np.array([[ 0.]])
assert np.allclose(b2, b2_expected, rtol=1e-05, atol=1e-06)
print("\033[92m" + "... end test" + "\033[0m")
if __name__ == "__main__":
test_initialize_hyper_parameters()
test_initialize_parameters()
| utils/init_parameters.py | 3,319 | Initialize parameters for different levels of the network
Arguments:
layer_acts -- python array (list) containing the activation functions of each layer in the network
learning_rate -- float value used as constant for gradient descent
Returns:
hyper_parameters -- python dictionary containing hyper_parameters (can be further extended)
Initialize parameters for different levels of the network
Arguments:
layer_dims -- python array (list) containing the dimensions of each layer in the network
Returns:
parameters -- python dictionary containing your parameters "W1", "b1", ..., "WL", "bL", ...:
Wl -- weight matrix of shape (layer_dims[l], layer_dims[l-1])
bl -- bias vector of shape (layer_dims[l], 1)
!/usr/bin/env python number of layers in the network number of layers in the network | 825 | en | 0.551498 |
"""
Code for particle tracking, designed for ROMS output. This new version
makes extensive use of nearest-neighbor KDTree algorithms for interpolation.
This results is significantly (36x) faster runtimes compared with old version.
PERFORMANCE: about 3 minutes per day for a 3D cas6 experiment with 10k particles.
NOTE: You have to have run make_KDTrees.py for the grid (e.g. cas6) before running.
NOTE: There is some issue, perhaps with garbage collection, which causes
the loading of NetCDF files to happen slower after running a few times
interactively from ipython. It appears that this can be avoided by running
from the terminal as: python tracker.py [args].
This program is a driver where you specify:
- an experiment (ROMS run + release locations + other choices)
- a release or set of releases within that experiment (start day, etc.)
The main argument you provide is -exp, which is the experiment name, and
is used by experiments.get_exp_info() and .get_ic() to get the gtagex and initial particle
locations. Other possible commmand line arguments and their defaults
are explained in the argparse section below.
NOTE: To improve usefulness for people other than me, this driver will
first look for:
- LiveOcean_user/tracker/user_trackfun.py
before loading my versions.
This allows you to create your own modifications to the tracking
(e.g. for diurnal depth behavior) while still being able to use git pull to update the main code.
It can be run on its own, or with command line arguments to facilitate
large, automated jobs, for example in python:
Examples:
python tracker.py -clb True
the same command, with all the argmuents typed, instead of getting the as defaults:
python tracker.py -gtx cas6_v3_lo8b -ro 2 -d 2019.07.04 -exp jdf0 -clb True
"""
import sys
from datetime import datetime, timedelta
from time import time
import argparse
import numpy as np
from lo_tools import Lfun, zfun
Ldir = Lfun.Lstart()
from importlib import reload
pth = Ldir['LOu'] / 'tracker'
if str(pth) not in sys.path:
sys.path.append(str(pth))
import experiments as exp
reload(exp)
import trackfun_nc as tfnc
reload(tfnc)
# The import of trackfun or user_trackfun is done later in this program,
# about 100 lines down.
# ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
# command line arguments, can be input in any order
parser = argparse.ArgumentParser()
# Set the experiment name
# (details set in experiments.py, or, if it exists, user_experiments.py)
parser.add_argument('-gtx', '--gtagex', default='cas6_v0_live', type=str)
parser.add_argument('-ro', '--roms_out_num', default=2, type=int)
# 1 = Ldir['roms_out1'], etc.
# this is the first starting day
parser.add_argument('-d', '--date_string', default='2021.10.15', type=str)
parser.add_argument('-exp', '--exp_name', default='elb', type=str)
parser.add_argument('-clb', '--clobber', default=False, type=zfun.boolean_string)
# overwrite existing output folder if clobber == True
parser.add_argument('-sub_tag', default='', type=str)
# append an optional tag to the end of the output folder name
# These are False unless the flags are used with the argument True
# so if you do NOT use these flags the run will be:
# - trapped to the surface
# - no vertical turbulent diffusion
parser.add_argument('-3d', default=False, type=zfun.boolean_string) # do 3d tracking
parser.add_argument('-laminar', default=False, type=zfun.boolean_string) # no turbulence
parser.add_argument('-no_advection', default=False, type=zfun.boolean_string) # no advection
parser.add_argument('-sink', default=0, type=float) # particle sinking speed (m per day, e.g. 40)
# windage = a small number: 0 <= windage << 1 (e.g. 0.03)
# fraction of windspeed added to advection, only for 3d=False
parser.add_argument('-wnd', '--windage', default=0, type=float)
# You can make multiple releases using:
# number_of_start_days > 1 & days_between_starts, and which hour (UTC) to start on
parser.add_argument('-nsd', '--number_of_start_days', default=1, type=int)
parser.add_argument('-dbs', '--days_between_starts', default=1, type=int)
parser.add_argument('-dtt', '--days_to_track', default=1, type=int)
parser.add_argument('-sh', '--start_hour', default=0, type=int)
# number of divisions to make between saves for the integration
# e.g. if ndiv = 12 and we have hourly saves, we use a 300 sec step
# for the integration. 300 s seems like a good default value,
# based on Banas et al. (2009, CSR RISE paper).
parser.add_argument('-ndiv', default=12, type=int)
parser.add_argument('-sph', default=1, type=int)
# sph = saves per hour, a new argument to allow more frequent writing of output.
args = parser.parse_args()
TR = args.__dict__
# ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
# set where to look for model output
if args.roms_out_num == 0:
TR['roms_out'] = Ldir['roms_out']
elif args.roms_out_num > 0:
TR['roms_out'] = Ldir['roms_out' + str(args.roms_out_num)]
# set dependent and default fields
TR['turb'] = False
# make sure sph is no greater than ndiv
TR['sph'] = np.min((TR['sph'],TR['ndiv']))
# overrides
if TR['3d']:
TR['windage'] = 0
TR['turb'] = True # default is that 3d is always turbulent
if TR['laminar']:
TR['turb'] = False
# get experiment info
TR['gridname'], TR['tag'], TR['ex_name'] = TR['gtagex'].split('_')
# pass some info to Ldir
Ldir['gtagex'] = TR['gtagex']
Ldir['roms_out'] = TR['roms_out']
# get the full path to a valid history file
fn00 = Ldir['roms_out'] / TR['gtagex'] / ('f' + TR['date_string']) / 'ocean_his_0001.nc'
TR['fn00'] = fn00
# set the name of the output folder
out_name = TR['exp_name']
# modify the output folder name, based on other choices
if TR['3d']:
out_name += '_3d'
elif not TR['3d']:
out_name += '_surf'
if TR['laminar']:
out_name += '_laminar'
if TR['windage'] > 0:
out_name += '_wind' + str(int(100*TR['windage']))
if TR['start_hour'] > 0:
out_name += '_sh' + str(int(TR['start_hour']))
if TR['sink'] > 0:
out_name += '_sink' + str(int(TR['sink']))
if TR['no_advection'] == True:
out_name += '_nadv'
if TR['ndiv'] != 12: # only mention ndiv if it is NOT 12
out_name += '_ndiv' + str(TR['ndiv'])
if len(TR['sub_tag']) > 0:
out_name += '_' + TR['sub_tag']
# make the list of start days (datetimes) for separate releases
idt_list = []
dt = datetime.strptime(TR['date_string'], '%Y.%m.%d')
for nic in range(TR['number_of_start_days']):
idt_list.append(dt)
dt = dt + timedelta(TR['days_between_starts'])
# make the output directory (empty)
outdir0 = Ldir['LOo'] / 'tracks'
outdir1 = out_name
outdir = outdir0 / outdir1
if outdir.is_dir():
if args.clobber:
pass # continue and overwrite if clobber is True
else:
print('Warning: output directory exists - rename if you want to keep it!!')
print('-- tracker run not started --')
sys.exit()
Lfun.make_dir(outdir, clean=True)
print(50*'*' + '\nWriting to ' + str(outdir))
sys.stdout.flush()
# Write some info to outdir0 for use by trackfun.py
Lfun.dict_to_csv(TR, outdir0 / 'exp_info.csv')
# and write the same info to outdir as part of the archived run output
Lfun.dict_to_csv(TR, outdir / 'exp_info.csv')
# Load the trackfun module.
# NOTE: we have to load this module AFTER we write [outdir0]/exp_info.csv
# because it uses that information to decide which KDTrees to load. Crude.
if (Ldir['LOu'] / 'tracker' / 'user_trackfun.py').is_file():
sys.path.append(str(Ldir['LOu'] / 'tracker'))
import user_trackfun as tfun
else:
import trackfun as tfun
reload(tfun)
# get the initial particle location vectors
EI = exp.get_exp_info(TR['exp_name'])
plon00, plat00, pcs00 = exp.get_ic(EI, TR['fn00'])
# step through the releases, one for each start day
write_grid = True
for idt0 in idt_list:
tt0 = time() # monitor integration time
# name the release file by start day
idt0_str = datetime.strftime(idt0,'%Y.%m.%d')
outname = ('release_' + idt0_str + '.nc')
print('-- ' + outname)
sys.stdout.flush()
out_fn = outdir / outname
# we do the calculation in one-day segments, but write complete
# output for a release to a single NetCDF file.
for nd in range(TR['days_to_track']):
# get or replace the history file list for this day
idt = idt0 + timedelta(days=nd)
idt_str = datetime.strftime(idt,'%Y.%m.%d')
print(' - working on ' + idt_str)
sys.stdout.flush()
fn_list = tfun.get_fn_list(idt, Ldir)
# write the grid file (once per experiment) for plotting
if write_grid == True:
g_infile = fn_list[0]
g_outfile = outdir / 'grid.nc'
tfnc.write_grid(g_infile, g_outfile)
write_grid = False
# DO THE TRACKING
if nd == 0: # first day
# set IC
plon0 = plon00.copy()
plat0 = plat00.copy()
pcs0 = pcs00.copy()
# do the tracking
if TR['start_hour'] > 0:
fn_list = fn_list[TR['start_hour']:]
P = tfun.get_tracks(fn_list, plon0, plat0, pcs0, TR, trim_loc=True)
# save the results to NetCDF
tfnc.start_outfile(out_fn, P)
else: # subsequent days
# set IC
plon0 = P['lon'][-1,:]
plat0 = P['lat'][-1,:]
pcs0 = P['cs'][-1,:]
# do the tracking
P = tfun.get_tracks(fn_list, plon0, plat0, pcs0, TR)
tfnc.append_to_outfile(out_fn, P)
print(' - Took %0.1f sec for %s day(s)' %
(time() - tt0, str(TR['days_to_track'])))
print(50*'=')
print(50*'*' + '\nWrote to ' + str(outdir))
| tracker/tracker/user_tracker.py | 9,682 | Code for particle tracking, designed for ROMS output. This new version
makes extensive use of nearest-neighbor KDTree algorithms for interpolation.
This results is significantly (36x) faster runtimes compared with old version.
PERFORMANCE: about 3 minutes per day for a 3D cas6 experiment with 10k particles.
NOTE: You have to have run make_KDTrees.py for the grid (e.g. cas6) before running.
NOTE: There is some issue, perhaps with garbage collection, which causes
the loading of NetCDF files to happen slower after running a few times
interactively from ipython. It appears that this can be avoided by running
from the terminal as: python tracker.py [args].
This program is a driver where you specify:
- an experiment (ROMS run + release locations + other choices)
- a release or set of releases within that experiment (start day, etc.)
The main argument you provide is -exp, which is the experiment name, and
is used by experiments.get_exp_info() and .get_ic() to get the gtagex and initial particle
locations. Other possible commmand line arguments and their defaults
are explained in the argparse section below.
NOTE: To improve usefulness for people other than me, this driver will
first look for:
- LiveOcean_user/tracker/user_trackfun.py
before loading my versions.
This allows you to create your own modifications to the tracking
(e.g. for diurnal depth behavior) while still being able to use git pull to update the main code.
It can be run on its own, or with command line arguments to facilitate
large, automated jobs, for example in python:
Examples:
python tracker.py -clb True
the same command, with all the argmuents typed, instead of getting the as defaults:
python tracker.py -gtx cas6_v3_lo8b -ro 2 -d 2019.07.04 -exp jdf0 -clb True
The import of trackfun or user_trackfun is done later in this program, about 100 lines down. ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ command line arguments, can be input in any order Set the experiment name (details set in experiments.py, or, if it exists, user_experiments.py) 1 = Ldir['roms_out1'], etc. this is the first starting day overwrite existing output folder if clobber == True append an optional tag to the end of the output folder name These are False unless the flags are used with the argument True so if you do NOT use these flags the run will be: - trapped to the surface - no vertical turbulent diffusion do 3d tracking no turbulence no advection particle sinking speed (m per day, e.g. 40) windage = a small number: 0 <= windage << 1 (e.g. 0.03) fraction of windspeed added to advection, only for 3d=False You can make multiple releases using: number_of_start_days > 1 & days_between_starts, and which hour (UTC) to start on number of divisions to make between saves for the integration e.g. if ndiv = 12 and we have hourly saves, we use a 300 sec step for the integration. 300 s seems like a good default value, based on Banas et al. (2009, CSR RISE paper). sph = saves per hour, a new argument to allow more frequent writing of output. ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ set where to look for model output set dependent and default fields make sure sph is no greater than ndiv overrides default is that 3d is always turbulent get experiment info pass some info to Ldir get the full path to a valid history file set the name of the output folder modify the output folder name, based on other choices only mention ndiv if it is NOT 12 make the list of start days (datetimes) for separate releases make the output directory (empty) continue and overwrite if clobber is True Write some info to outdir0 for use by trackfun.py and write the same info to outdir as part of the archived run output Load the trackfun module. NOTE: we have to load this module AFTER we write [outdir0]/exp_info.csv because it uses that information to decide which KDTrees to load. Crude. get the initial particle location vectors step through the releases, one for each start day monitor integration time name the release file by start day we do the calculation in one-day segments, but write complete output for a release to a single NetCDF file. get or replace the history file list for this day write the grid file (once per experiment) for plotting DO THE TRACKING first day set IC do the tracking save the results to NetCDF subsequent days set IC do the tracking | 4,390 | en | 0.845306 |
"""
Custom Decorators
"""
# Django
from django.shortcuts import redirect, reverse
from django.http import JsonResponse
from django.utils.translation import gettext as _
from django.http import Http404
# local Django
from app.modules.util.helpers import Helpers
from app.modules.core.response import Response
from app.modules.entity.option_entity import Option_Entity
def redirect_if_authenticated(function):
def wrap(controller, request, *args, **kwargs):
if request.user and request.user.is_authenticated:
if "redirect" in request.GET:
return redirect(request.GET["redirect"])
return redirect("app.web.admin.dashboard")
return function(controller, request, *args, **kwargs)
return wrap
def login_if_not_authenticated(function):
def wrap(controller, request, *args, **kwargs):
if not request.user or not request.user.is_authenticated:
return redirect(reverse("app.web.login") + "?redirect=" + request.get_full_path())
return function(controller, request, *args, **kwargs)
return wrap
def stop_request_if_authenticated(function):
def wrap(controller, request, *args, **kwargs):
if request.user and request.user.is_authenticated:
response = Response()
return JsonResponse(response.send_private_failure([{
"type": "error",
"message": _("Error! Access forbidden for authenticated users.")
}]))
return function(controller, request, *args, **kwargs)
return wrap
def redirect_if_not_installed(function):
def wrap(controller, request, *args, **kwargs):
installed = False if Option_Entity().get_one_by_key("app_installed") is False else True
if not installed:
return redirect("app.web.install")
return function(controller, request, *args, **kwargs)
return wrap
def protect_metric_with_auth_key(function):
def wrap(controller, request, *args, **kwargs):
if kwargs["type"] == "prometheus":
prometheus_token = Option_Entity().get_one_by_key("prometheus_token")
if prometheus_token.value != "" and ("HTTP_AUTHORIZATION" not in request.META or prometheus_token.value != request.META["HTTP_AUTHORIZATION"]):
raise Http404("Host not found.")
return function(controller, request, *args, **kwargs)
return wrap
def stop_request_if_installed(function):
def wrap(controller, request, *args, **kwargs):
installed = False if Option_Entity().get_one_by_key("app_installed") is False else True
if installed:
response = Response()
return JsonResponse(response.send_private_failure([{
"type": "error",
"message": _("Error! Application is already installed.")
}]))
return function(controller, request, *args, **kwargs)
return wrap
def log_request_data(function):
def wrap(controller, request, *args, **kwargs):
_helper = Helpers()
_logger = _helper.get_logger(__name__)
_logger.debug(_("Request Method: %s") % request.method)
_logger.debug(_("Request URL: %s") % request.path)
_logger.debug(_("Request Body: %s") % request.body)
return function(controller, request, *args, **kwargs)
return wrap
| app/modules/core/decorators.py | 3,339 | Custom Decorators
Django local Django | 39 | en | 0.156442 |
from datetime import datetime, timedelta
from django.test import TestCase
from mock import patch
from corehq.apps.domain.models import Domain
from corehq.apps.hqcase.utils import update_case
from corehq.apps.sms.mixin import PhoneNumberInUseException
from corehq.apps.sms.models import (
PhoneNumber,
SQLMobileBackend,
SQLMobileBackendMapping,
)
from corehq.apps.sms.tasks import (
delete_phone_numbers_for_owners,
sync_case_phone_number,
)
from corehq.apps.sms.tests.util import delete_domain_phone_numbers
from corehq.apps.users.models import CommCareUser, WebUser
from corehq.apps.users.tasks import tag_cases_as_deleted_and_remove_indices
from corehq.form_processor.interfaces.dbaccessors import CaseAccessors
from corehq.form_processor.tests.utils import run_with_all_backends
from corehq.form_processor.utils import is_commcarecase
from corehq.messaging.smsbackends.test.models import SQLTestSMSBackend
from corehq.util.test_utils import create_test_case
class PhoneNumberCacheClearTestCase(TestCase):
def assertNoMatch(self, phone_search, suffix_search, owner_id_search):
self.assertIsNone(PhoneNumber.get_two_way_number(phone_search))
self.assertIsNone(PhoneNumber.get_two_way_number_by_suffix(suffix_search))
self.assertEqual(PhoneNumber.by_owner_id(owner_id_search), [])
def assertPhoneNumbersEqual(self, phone1, phone2):
for field in phone1._meta.fields:
self.assertEqual(getattr(phone1, field.name), getattr(phone2, field.name))
def assertMatch(self, match, phone_search, suffix_search, owner_id_search):
lookedup = PhoneNumber.get_two_way_number(phone_search)
self.assertPhoneNumbersEqual(match, lookedup)
lookedup = PhoneNumber.get_two_way_number_by_suffix(suffix_search)
self.assertPhoneNumbersEqual(match, lookedup)
[lookedup] = PhoneNumber.by_owner_id(owner_id_search)
self.assertPhoneNumbersEqual(match, lookedup)
def _test_cache_clear(self, refresh_each_time=True):
"""
A test to make sure that the cache clearing is working as expected.
This test gets run twice using different values for refresh_each_time.
This makes sure that the mechanism used for clearing the cache works
whether you're updating a document you just saved or getting a document
fresh from the database and updating it.
"""
created = PhoneNumber(
domain='phone-number-test',
owner_doc_type='CommCareCase',
owner_id='fake-owner-id1',
phone_number='99912341234',
backend_id=None,
ivr_backend_id=None,
verified=True,
pending_verification=False,
is_two_way=True,
contact_last_modified=datetime.utcnow()
)
created.save()
self.assertNoMatch('99952345234', '52345234', 'fake-owner-id2')
self.assertMatch(created, '99912341234', '12341234', 'fake-owner-id1')
# Update Phone Number
if refresh_each_time:
created = PhoneNumber.objects.get(pk=created.pk)
created.phone_number = '99952345234'
created.save()
self.assertNoMatch('99912341234', '12341234', 'fake-owner-id2')
self.assertMatch(created, '99952345234', '52345234', 'fake-owner-id1')
# Update Owner Id
if refresh_each_time:
created = PhoneNumber.objects.get(pk=created.pk)
created.owner_id = 'fake-owner-id2'
created.save()
self.assertNoMatch('99912341234', '12341234', 'fake-owner-id1')
self.assertMatch(created, '99952345234', '52345234', 'fake-owner-id2')
created.delete()
self.assertNoMatch('99952345234', '52345234', 'fake-owner-id2')
def test_cache_clear_with_refresh(self):
self._test_cache_clear(refresh_each_time=True)
def test_cache_clear_without_refresh(self):
self._test_cache_clear(refresh_each_time=False)
class CaseContactPhoneNumberTestCase(TestCase):
def setUp(self):
self.domain = 'case-phone-number-test'
def tearDown(self):
delete_domain_phone_numbers(self.domain)
def set_case_property(self, case, property_name, value):
update_case(self.domain, case.case_id, case_properties={property_name: value})
return CaseAccessors(self.domain).get_case(case.case_id)
def get_case_phone_number(self, case):
return case.get_phone_number()
def assertPhoneNumberDetails(self, case, phone_number, sms_backend_id, ivr_backend_id,
verified, pending_verification, is_two_way, pk=None):
v = self.get_case_phone_number(case)
self.assertEqual(v.domain, case.domain)
self.assertEqual(v.owner_doc_type, case.doc_type)
self.assertEqual(v.owner_id, case.case_id)
self.assertEqual(v.phone_number, phone_number)
self.assertEqual(v.backend_id, sms_backend_id)
self.assertEqual(v.ivr_backend_id, ivr_backend_id)
self.assertEqual(v.verified, verified)
self.assertEqual(v.pending_verification, pending_verification)
self.assertEqual(v.is_two_way, is_two_way)
self.assertEqual(v.contact_last_modified, case.server_modified_on)
if pk:
self.assertEqual(v.pk, pk)
@run_with_all_backends
def test_case_phone_number_updates(self):
extra_number = PhoneNumber.objects.create(
domain=self.domain,
owner_doc_type='X',
owner_id='X',
phone_number='999123',
verified=True,
pending_verification=False,
is_two_way=True
)
self.assertEqual(PhoneNumber.count_by_domain(self.domain), 1)
with create_test_case(self.domain, 'participant', 'test1', drop_signals=False) as case:
self.assertIsNone(self.get_case_phone_number(case))
case = self.set_case_property(case, 'contact_phone_number', '99987658765')
self.assertPhoneNumberDetails(case, '99987658765', None, None, False, False, False)
pk = self.get_case_phone_number(case).pk
self.assertEqual(PhoneNumber.count_by_domain(self.domain), 2)
case = self.set_case_property(case, 'contact_phone_number_is_verified', '1')
self.assertPhoneNumberDetails(case, '99987658765', None, None, True, False, True, pk=pk)
self.assertEqual(PhoneNumber.count_by_domain(self.domain), 2)
case = self.set_case_property(case, 'contact_phone_number', '99987698769')
self.assertPhoneNumberDetails(case, '99987698769', None, None, True, False, True)
pk = self.get_case_phone_number(case).pk
self.assertEqual(PhoneNumber.count_by_domain(self.domain), 2)
case = self.set_case_property(case, 'contact_backend_id', 'sms-backend')
self.assertPhoneNumberDetails(case, '99987698769', 'sms-backend', None, True, False, True, pk=pk)
self.assertEqual(PhoneNumber.count_by_domain(self.domain), 2)
case = self.set_case_property(case, 'contact_ivr_backend_id', 'ivr-backend')
self.assertPhoneNumberDetails(case, '99987698769', 'sms-backend', 'ivr-backend', True, False, True,
pk=pk)
self.assertEqual(PhoneNumber.count_by_domain(self.domain), 2)
# If phone entry is ahead of the case in terms of contact_last_modified, no update should happen
v = self.get_case_phone_number(case)
v.contact_last_modified += timedelta(days=1)
v.save()
with patch('corehq.apps.sms.models.PhoneNumber.save') as mock_save:
case = self.set_case_property(case, 'contact_phone_number', '99912341234')
self.assertEqual(PhoneNumber.count_by_domain(self.domain), 2)
mock_save.assert_not_called()
self.assertEqual(PhoneNumber.get_two_way_number('999123'), extra_number)
@run_with_all_backends
def test_close_case(self):
extra_number = PhoneNumber.objects.create(
domain=self.domain,
owner_doc_type='X',
owner_id='X',
phone_number='999123',
verified=True,
pending_verification=False,
is_two_way=True
)
self.assertEqual(PhoneNumber.count_by_domain(self.domain), 1)
with create_test_case(self.domain, 'participant', 'test1', drop_signals=False) as case:
case = self.set_case_property(case, 'contact_phone_number', '99987658765')
case = self.set_case_property(case, 'contact_phone_number_is_verified', '1')
self.assertIsNotNone(self.get_case_phone_number(case))
self.assertEqual(PhoneNumber.count_by_domain(self.domain), 2)
update_case(self.domain, case.case_id, close=True)
self.assertIsNone(self.get_case_phone_number(case))
self.assertEqual(PhoneNumber.count_by_domain(self.domain), 1)
self.assertEqual(PhoneNumber.get_two_way_number('999123'), extra_number)
@run_with_all_backends
def test_case_soft_delete(self):
extra_number = PhoneNumber.objects.create(
domain=self.domain,
owner_doc_type='X',
owner_id='X',
phone_number='999123',
verified=True,
pending_verification=False,
is_two_way=True
)
self.assertEqual(PhoneNumber.count_by_domain(self.domain), 1)
with create_test_case(self.domain, 'participant', 'test1', drop_signals=False) as case:
case = self.set_case_property(case, 'contact_phone_number', '99987658765')
case = self.set_case_property(case, 'contact_phone_number_is_verified', '1')
self.assertIsNotNone(self.get_case_phone_number(case))
self.assertEqual(PhoneNumber.count_by_domain(self.domain), 2)
tag_cases_as_deleted_and_remove_indices(self.domain, [case.case_id], '123', datetime.utcnow())
self.assertIsNone(self.get_case_phone_number(case))
self.assertEqual(PhoneNumber.count_by_domain(self.domain), 1)
self.assertEqual(PhoneNumber.get_two_way_number('999123'), extra_number)
@run_with_all_backends
def test_case_zero_phone_number(self):
extra_number = PhoneNumber.objects.create(
domain=self.domain,
owner_doc_type='X',
owner_id='X',
phone_number='999123',
verified=True,
pending_verification=False,
is_two_way=True
)
self.assertEqual(PhoneNumber.count_by_domain(self.domain), 1)
with create_test_case(self.domain, 'participant', 'test1', drop_signals=False) as case:
case = self.set_case_property(case, 'contact_phone_number', '99987658765')
case = self.set_case_property(case, 'contact_phone_number_is_verified', '1')
self.assertIsNotNone(self.get_case_phone_number(case))
self.assertEqual(PhoneNumber.count_by_domain(self.domain), 2)
case = self.set_case_property(case, 'contact_phone_number', '0')
self.assertIsNone(self.get_case_phone_number(case))
self.assertEqual(PhoneNumber.count_by_domain(self.domain), 1)
self.assertEqual(PhoneNumber.get_two_way_number('999123'), extra_number)
@run_with_all_backends
def test_invalid_phone_format(self):
extra_number = PhoneNumber.objects.create(
domain=self.domain,
owner_doc_type='X',
owner_id='X',
phone_number='999123',
verified=True,
pending_verification=False,
is_two_way=True
)
self.assertEqual(PhoneNumber.count_by_domain(self.domain), 1)
with create_test_case(self.domain, 'participant', 'test1', drop_signals=False) as case:
case = self.set_case_property(case, 'contact_phone_number', '99987658765')
case = self.set_case_property(case, 'contact_phone_number_is_verified', '1')
self.assertIsNotNone(self.get_case_phone_number(case))
self.assertEqual(PhoneNumber.count_by_domain(self.domain), 2)
case = self.set_case_property(case, 'contact_phone_number', 'xyz')
self.assertIsNone(self.get_case_phone_number(case))
self.assertEqual(PhoneNumber.count_by_domain(self.domain), 1)
self.assertEqual(PhoneNumber.get_two_way_number('999123'), extra_number)
@run_with_all_backends
def test_phone_number_already_in_use(self):
self.assertEqual(PhoneNumber.count_by_domain(self.domain), 0)
with create_test_case(self.domain, 'participant', 'test1', drop_signals=False) as case1, \
create_test_case(self.domain, 'participant', 'test2', drop_signals=False) as case2:
case1 = self.set_case_property(case1, 'contact_phone_number', '99987658765')
case1 = self.set_case_property(case1, 'contact_phone_number_is_verified', '1')
case2 = self.set_case_property(case2, 'contact_phone_number', '99987698769')
case2 = self.set_case_property(case2, 'contact_phone_number_is_verified', '1')
self.assertIsNotNone(self.get_case_phone_number(case1))
self.assertIsNotNone(self.get_case_phone_number(case2))
self.assertEqual(PhoneNumber.count_by_domain(self.domain), 2)
case2 = self.set_case_property(case2, 'contact_phone_number', '99987658765')
self.assertIsNotNone(self.get_case_phone_number(case1))
self.assertIsNotNone(self.get_case_phone_number(case2))
self.assertEqual(PhoneNumber.count_by_domain(self.domain), 2)
self.assertPhoneNumberDetails(case1, '99987658765', None, None, True, False, True)
self.assertPhoneNumberDetails(case2, '99987658765', None, None, False, False, False)
@run_with_all_backends
def test_multiple_entries(self):
extra_number = PhoneNumber.objects.create(
domain=self.domain,
owner_doc_type='X',
owner_id='X',
phone_number='999124',
verified=False,
pending_verification=False,
is_two_way=False
)
with create_test_case(self.domain, 'participant', 'test1', drop_signals=False) as case:
case = self.set_case_property(case, 'contact_phone_number', '999124')
case = self.set_case_property(case, 'contact_phone_number_is_verified', '1')
case.create_phone_entry('999125')
self.assertEqual(PhoneNumber.objects.count(), 3)
sync_case_phone_number(case)
self.assertEqual(PhoneNumber.objects.count(), 2)
number1 = PhoneNumber.objects.get(pk=extra_number.pk)
self.assertEqual(number1.owner_id, 'X')
number2 = PhoneNumber.objects.get(owner_id=case.case_id)
self.assertTrue(number2.verified)
self.assertTrue(number2.is_two_way)
self.assertFalse(number2.pending_verification)
class SQLPhoneNumberTestCase(TestCase):
def setUp(self):
self.domain = 'sql-phone-number-test'
self.domain_obj = Domain(name=self.domain)
self.domain_obj.save()
def delete_objects(self, result):
for obj in result:
# Delete and clear cache
obj.delete()
def tearDown(self):
self.delete_objects(PhoneNumber.objects.filter(domain=self.domain))
self.delete_objects(SQLMobileBackend.objects.filter(domain=self.domain))
SQLMobileBackendMapping.objects.filter(domain=self.domain).delete()
self.domain_obj.delete()
def test_backend(self):
backend1 = SQLTestSMSBackend.objects.create(
hq_api_id=SQLTestSMSBackend.get_api_id(),
is_global=False,
domain=self.domain,
name='BACKEND1'
)
backend2 = SQLTestSMSBackend.objects.create(
hq_api_id=SQLTestSMSBackend.get_api_id(),
is_global=False,
domain=self.domain,
name='BACKEND2'
)
SQLMobileBackendMapping.set_default_domain_backend(self.domain, backend1)
number = PhoneNumber(domain=self.domain, phone_number='+999123')
self.assertEqual(number.backend, backend1)
number.backend_id = backend2.name
self.assertEqual(number.backend, backend2)
number.backend_id = ' '
self.assertEqual(number.backend, backend1)
@run_with_all_backends
def test_case_owner(self):
with create_test_case(self.domain, 'participant', 'test') as case:
number = PhoneNumber(owner_doc_type='CommCareCase', owner_id=case.case_id)
owner = number.owner
self.assertTrue(is_commcarecase(owner))
self.assertEqual(owner.case_id, case.case_id)
def test_user_owner(self):
mobile_user = CommCareUser.create(self.domain, 'abc', 'def', None, None)
number = PhoneNumber(owner_doc_type='CommCareUser', owner_id=mobile_user.get_id)
owner = number.owner
self.assertTrue(isinstance(owner, CommCareUser))
self.assertEqual(owner.get_id, mobile_user.get_id)
web_user = WebUser.create(self.domain, 'ghi', 'jkl', None, None)
number = PhoneNumber(owner_doc_type='WebUser', owner_id=web_user.get_id)
owner = number.owner
self.assertTrue(isinstance(owner, WebUser))
self.assertEqual(owner.get_id, web_user.get_id)
number = PhoneNumber(owner_doc_type='X')
self.assertIsNone(number.owner)
def test_get_two_way_number(self):
number1 = PhoneNumber.objects.create(
domain=self.domain,
owner_doc_type='X',
owner_id='X',
phone_number='999123',
verified=True,
pending_verification=False,
is_two_way=True
)
PhoneNumber.objects.create(
domain=self.domain,
owner_doc_type='X',
owner_id='X',
phone_number='999123',
verified=False,
pending_verification=False,
is_two_way=False
)
self.assertEqual(PhoneNumber.get_two_way_number('999123'), number1)
self.assertEqual(PhoneNumber.get_two_way_number('+999 123'), number1)
self.assertIsNone(PhoneNumber.get_two_way_number('999124'))
self.assertIsNone(PhoneNumber.get_number_pending_verification('999123'))
self.assertIsNone(PhoneNumber.get_number_pending_verification('999124'))
self.assertEqual(PhoneNumber.count_by_domain(self.domain), 2)
# test cache clear on save
number1.phone_number = '999124'
number1.save()
self.assertIsNone(PhoneNumber.get_two_way_number('999123'))
self.assertEqual(PhoneNumber.get_two_way_number('999124'), number1)
self.assertIsNone(PhoneNumber.get_number_pending_verification('999123'))
self.assertIsNone(PhoneNumber.get_number_pending_verification('999124'))
self.assertEqual(PhoneNumber.count_by_domain(self.domain), 2)
# test cache clear on delete
number1.delete()
self.assertIsNone(PhoneNumber.get_two_way_number('999123'))
self.assertIsNone(PhoneNumber.get_two_way_number('999124'))
self.assertIsNone(PhoneNumber.get_number_pending_verification('999123'))
self.assertIsNone(PhoneNumber.get_number_pending_verification('999124'))
self.assertEqual(PhoneNumber.count_by_domain(self.domain), 1)
def test_get_number_pending_verification(self):
number1 = PhoneNumber.objects.create(
domain=self.domain,
owner_doc_type='X',
owner_id='X',
phone_number='999123',
verified=False,
pending_verification=True,
is_two_way=False
)
PhoneNumber.objects.create(
domain=self.domain,
owner_doc_type='X',
owner_id='X',
phone_number='999123',
verified=False,
pending_verification=False,
is_two_way=False
)
self.assertIsNone(PhoneNumber.get_two_way_number('999123'))
self.assertIsNone(PhoneNumber.get_two_way_number('999124'))
self.assertEqual(PhoneNumber.get_number_pending_verification('999123'), number1)
self.assertEqual(PhoneNumber.get_number_pending_verification('+999 123'), number1)
self.assertIsNone(PhoneNumber.get_number_pending_verification('999124'))
self.assertEqual(PhoneNumber.count_by_domain(self.domain), 2)
# test cache clear on save
number1.phone_number = '999124'
number1.save()
self.assertIsNone(PhoneNumber.get_two_way_number('999123'))
self.assertIsNone(PhoneNumber.get_two_way_number('999124'))
self.assertIsNone(PhoneNumber.get_number_pending_verification('999123'))
self.assertEqual(PhoneNumber.get_number_pending_verification('999124'), number1)
self.assertEqual(PhoneNumber.count_by_domain(self.domain), 2)
# test promotion to two-way
number1.set_two_way()
number1.set_verified()
number1.save()
self.assertIsNone(PhoneNumber.get_two_way_number('999123'))
self.assertEqual(PhoneNumber.get_two_way_number('999124'), number1)
self.assertIsNone(PhoneNumber.get_number_pending_verification('999123'))
self.assertIsNone(PhoneNumber.get_number_pending_verification('999124'))
self.assertEqual(PhoneNumber.count_by_domain(self.domain), 2)
# test cache clear on delete
number1.delete()
self.assertIsNone(PhoneNumber.get_two_way_number('999123'))
self.assertIsNone(PhoneNumber.get_two_way_number('999124'))
self.assertIsNone(PhoneNumber.get_number_pending_verification('999123'))
self.assertIsNone(PhoneNumber.get_number_pending_verification('999124'))
self.assertEqual(PhoneNumber.count_by_domain(self.domain), 1)
def test_suffix_lookup(self):
number1 = PhoneNumber.objects.create(
domain=self.domain,
owner_doc_type='X',
owner_id='X',
phone_number='999123',
verified=True,
pending_verification=False,
is_two_way=True
)
number2 = PhoneNumber.objects.create(
domain=self.domain,
owner_doc_type='X',
owner_id='X',
phone_number='999223',
verified=True,
pending_verification=False,
is_two_way=True
)
self.assertEqual(PhoneNumber.get_two_way_number_by_suffix('1 23'), number1)
self.assertEqual(PhoneNumber.get_two_way_number_by_suffix('2 23'), number2)
self.assertIsNone(PhoneNumber.get_two_way_number_by_suffix('23'))
# test update
number1.phone_number = '999124'
number1.save()
number2.phone_number = '999224'
number2.save()
self.assertIsNone(PhoneNumber.get_two_way_number_by_suffix('1 23'))
self.assertIsNone(PhoneNumber.get_two_way_number_by_suffix('2 23'))
self.assertEqual(PhoneNumber.get_two_way_number_by_suffix('124'), number1)
self.assertEqual(PhoneNumber.get_two_way_number_by_suffix('224'), number2)
def test_extensive_search(self):
number = PhoneNumber.objects.create(
domain=self.domain,
owner_doc_type='X',
owner_id='X',
phone_number='999123',
verified=True,
pending_verification=False,
is_two_way=True
)
self.assertEqual(PhoneNumber.by_extensive_search('999123'), number)
self.assertEqual(PhoneNumber.by_extensive_search('0999123'), number)
self.assertEqual(PhoneNumber.by_extensive_search('00999123'), number)
self.assertEqual(PhoneNumber.by_extensive_search('000999123'), number)
self.assertEqual(PhoneNumber.by_extensive_search('123'), number)
self.assertIsNone(PhoneNumber.by_extensive_search('999124'))
def test_by_domain(self):
number1 = PhoneNumber.objects.create(
domain=self.domain,
owner_doc_type='X',
owner_id='X',
phone_number='999123',
verified=True,
pending_verification=False,
is_two_way=True
)
number2 = PhoneNumber.objects.create(
domain=self.domain,
owner_doc_type='X',
owner_id='X',
phone_number='999124',
verified=False,
pending_verification=False,
is_two_way=False
)
number3 = PhoneNumber.objects.create(
domain=self.domain + 'X',
owner_doc_type='X',
owner_id='X',
phone_number='999124',
verified=True,
pending_verification=False,
is_two_way=True
)
self.addCleanup(number3.delete)
self.assertEqual(
set(PhoneNumber.by_domain(self.domain)),
set([number1, number2])
)
self.assertEqual(
set(PhoneNumber.by_domain(self.domain, ids_only=True)),
set([number1.couch_id, number2.couch_id])
)
self.assertEqual(PhoneNumber.count_by_domain(self.domain), 2)
def test_by_owner_id(self):
number = PhoneNumber.objects.create(
domain=self.domain,
owner_doc_type='X',
owner_id='owner1',
phone_number='999123',
verified=True,
pending_verification=False,
is_two_way=True
)
[lookup] = PhoneNumber.by_owner_id('owner1')
self.assertEqual(lookup, number)
# test cache clear
number.owner_id = 'owner2'
number.save()
self.assertEqual(PhoneNumber.by_owner_id('owner1'), [])
[lookup] = PhoneNumber.by_owner_id('owner2')
self.assertEqual(lookup, number)
number.verified = False
number.is_two_way = False
number.save()
[lookup] = PhoneNumber.by_owner_id('owner2')
self.assertFalse(lookup.verified)
self.assertFalse(lookup.is_two_way)
def create_case_contact(self, phone_number):
return create_test_case(
self.domain,
'participant',
'test',
case_properties={
'contact_phone_number': phone_number,
'contact_phone_number_is_verified': '1',
},
drop_signals=False
)
@run_with_all_backends
def test_delete_phone_numbers_for_owners(self):
with self.create_case_contact('9990001') as case1, \
self.create_case_contact('9990002') as case2, \
self.create_case_contact('9990003') as case3:
self.assertEqual(len(PhoneNumber.by_owner_id(case1.case_id)), 1)
self.assertEqual(len(PhoneNumber.by_owner_id(case2.case_id)), 1)
self.assertEqual(len(PhoneNumber.by_owner_id(case3.case_id)), 1)
self.assertEqual(PhoneNumber.count_by_domain(self.domain), 3)
delete_phone_numbers_for_owners([case2.case_id, case3.case_id])
self.assertEqual(len(PhoneNumber.by_owner_id(case1.case_id)), 1)
self.assertEqual(len(PhoneNumber.by_owner_id(case2.case_id)), 0)
self.assertEqual(len(PhoneNumber.by_owner_id(case3.case_id)), 0)
self.assertEqual(PhoneNumber.count_by_domain(self.domain), 1)
def test_verify_uniqueness(self):
number1 = PhoneNumber.objects.create(
domain=self.domain,
owner_doc_type='X',
owner_id='X',
phone_number='999123',
verified=True,
pending_verification=False,
is_two_way=True
)
number2 = PhoneNumber.objects.create(
domain=self.domain,
owner_doc_type='X',
owner_id='X',
phone_number='999123',
verified=False,
pending_verification=False,
is_two_way=False
)
# Raises no exception
number1.verify_uniqueness()
# Raises PhoneNumberInUseException
with self.assertRaises(PhoneNumberInUseException):
number2.verify_uniqueness()
class TestUserPhoneNumberSync(TestCase):
def setUp(self):
self.domain = 'user-phone-number-test'
self.domain_obj = Domain(name=self.domain)
self.domain_obj.save()
self.mobile_worker1 = CommCareUser.create(self.domain, 'mobile1', 'mobile1', None, None)
self.mobile_worker2 = CommCareUser.create(self.domain, 'mobile2', 'mobile2', None, None)
def tearDown(self):
delete_domain_phone_numbers(self.domain)
self.domain_obj.delete()
def assertPhoneEntries(self, user, phone_numbers):
entries = user.get_phone_entries()
self.assertEqual(len(entries), len(phone_numbers))
self.assertEqual(set(entries.keys()), set(phone_numbers))
def testSync(self):
extra_number = PhoneNumber.objects.create(
domain=self.domain,
owner_doc_type='X',
owner_id='owner1',
phone_number='999123',
verified=True,
pending_verification=False,
is_two_way=True
)
user = self.mobile_worker1
self.assertEqual(PhoneNumber.by_domain(self.domain).count(), 1)
user.phone_numbers = ['9990001']
user.save()
self.assertEqual(PhoneNumber.by_domain(self.domain).count(), 2)
self.assertPhoneEntries(user, ['9990001'])
before = user.get_phone_entries()['9990001']
user.phone_numbers = ['9990001', '9990002']
user.save()
self.assertEqual(PhoneNumber.by_domain(self.domain).count(), 3)
self.assertPhoneEntries(user, ['9990001', '9990002'])
after = user.get_phone_entries()['9990001']
self.assertEqual(before.pk, after.pk)
user.phone_numbers = ['9990002']
user.save()
self.assertEqual(PhoneNumber.by_domain(self.domain).count(), 2)
self.assertPhoneEntries(user, ['9990002'])
self.assertEqual(PhoneNumber.get_two_way_number('999123'), extra_number)
def testRetire(self):
self.mobile_worker1.phone_numbers = ['9990001']
self.mobile_worker1.save()
self.assertEqual(PhoneNumber.by_domain(self.domain).count(), 1)
self.assertPhoneEntries(self.mobile_worker1, ['9990001'])
self.mobile_worker2.phone_numbers = ['9990002']
self.mobile_worker2.save()
self.assertEqual(PhoneNumber.by_domain(self.domain).count(), 2)
self.assertPhoneEntries(self.mobile_worker2, ['9990002'])
self.mobile_worker1.retire(deleted_by=None)
self.assertEqual(PhoneNumber.by_domain(self.domain).count(), 1)
self.assertPhoneEntries(self.mobile_worker2, ['9990002'])
class TestGenericContactMethods(TestCase):
def setUp(self):
self.domain = 'contact-phone-number-test'
self.domain_obj = Domain(name=self.domain)
self.domain_obj.save()
self.mobile_worker1 = CommCareUser.create(self.domain, 'mobile1', 'mobile1', None, None)
self.mobile_worker2 = CommCareUser.create(self.domain, 'mobile2', 'mobile2', None, None)
def tearDown(self):
delete_domain_phone_numbers(self.domain)
self.domain_obj.delete()
def testGetOrCreate(self):
before = self.mobile_worker1.get_or_create_phone_entry('999123')
self.assertEqual(before.owner_doc_type, 'CommCareUser')
self.assertEqual(before.owner_id, self.mobile_worker1.get_id)
self.assertEqual(before.phone_number, '999123')
self.assertEqual(PhoneNumber.by_domain(self.domain).count(), 1)
after = self.mobile_worker1.get_or_create_phone_entry('999123')
self.assertEqual(before.pk, after.pk)
self.assertEqual(after.owner_doc_type, 'CommCareUser')
self.assertEqual(after.owner_id, self.mobile_worker1.get_id)
self.assertEqual(after.phone_number, '999123')
self.assertEqual(PhoneNumber.by_domain(self.domain).count(), 1)
def testGetPhoneEntries(self):
number1 = self.mobile_worker1.get_or_create_phone_entry('999123')
number2 = self.mobile_worker1.get_or_create_phone_entry('999124')
self.mobile_worker1.get_or_create_phone_entry('999125')
number4 = self.mobile_worker2.get_or_create_phone_entry('999126')
number1.set_two_way()
number2.set_pending_verification()
number4.set_two_way()
self.assertEqual(PhoneNumber.by_domain(self.domain).count(), 4)
entries = self.mobile_worker1.get_phone_entries()
self.assertEqual(set(entries.keys()), set(['999123', '999124', '999125']))
entries = self.mobile_worker1.get_two_way_numbers()
self.assertEqual(set(entries.keys()), set(['999123']))
def testDelete(self):
self.mobile_worker1.get_or_create_phone_entry('999123')
self.mobile_worker1.get_or_create_phone_entry('999124')
self.mobile_worker1.get_or_create_phone_entry('999125')
self.mobile_worker2.get_or_create_phone_entry('999126')
self.assertEqual(PhoneNumber.by_domain(self.domain).count(), 4)
self.mobile_worker1.delete_phone_entry('999124')
self.assertEqual(PhoneNumber.by_domain(self.domain).count(), 3)
entries = self.mobile_worker1.get_phone_entries()
self.assertEqual(set(entries.keys()), set(['999123', '999125']))
entries = self.mobile_worker2.get_phone_entries()
self.assertEqual(set(entries.keys()), set(['999126']))
def testUserSyncNoChange(self):
before = self.mobile_worker1.get_or_create_phone_entry('999123')
before.set_two_way()
before.set_verified()
before.save()
self.assertEqual(PhoneNumber.by_domain(self.domain).count(), 1)
self.mobile_worker1.phone_numbers = ['999123']
self.mobile_worker1.save()
self.assertEqual(PhoneNumber.by_domain(self.domain).count(), 1)
after = self.mobile_worker1.get_phone_entries()['999123']
self.assertEqual(before.pk, after.pk)
self.assertTrue(after.is_two_way)
self.assertTrue(after.verified)
self.assertFalse(after.pending_verification)
| corehq/apps/sms/tests/test_phone_numbers.py | 34,381 | A test to make sure that the cache clearing is working as expected.
This test gets run twice using different values for refresh_each_time.
This makes sure that the mechanism used for clearing the cache works
whether you're updating a document you just saved or getting a document
fresh from the database and updating it.
Update Phone Number Update Owner Id If phone entry is ahead of the case in terms of contact_last_modified, no update should happen Delete and clear cache test cache clear on save test cache clear on delete test cache clear on save test promotion to two-way test cache clear on delete test update test cache clear Raises no exception Raises PhoneNumberInUseException | 688 | en | 0.85646 |
from hash_map_base_class import *
class ProbeHashMap(HashMapBase):
"""Hash map implemented with linear probing for collision resolution."""
_AVAIL = object() # sentinal marks locations of previous deletions
def _is_available(self,j):
"""Return True if the index j is available in the table."""
return self._table[j] is None or self._table[j] is ProbeHashMap._AVAIL
def _find_slot(self,j,k):
"""Search for key k in bucket at index j.
Return (success, index) tuple, described as follows:
If match was found, success is True and index denotes its location.
If no match found, success is False and index denotes first available slot.
"""
firstAvail = None
while True:
if self._is_available(j):
if firstAvail is None:
firstAvail = j # mark this as first avail
if self._table[j] is None:
return (False, firstAvail) # search has failed
elif k == self._table[j]._key:
return (True,j) # found a match
j = (j+1)%len(self._table) # keep looking (cyclically)
def _bucket_getitem(self,j,k):
found,s = self._find_slot(j,k)
if not found:
raise KeyError("Key Error: " + repr(k)) # no match found
return self._table[s]._value
def _bucket_setitem(self,j,k,v):
found,s = self._find_slot(j,k)
if not found:
self._table[s] = self._Item(k,v) # insert new item
self._n += 1 # size has increased
else:
self._table[s]._value = v # overwrite existing
def _bucket_delitem(self,j,k):
found,s = self._find_slot(j,k)
if not found:
raise KeyError("Key Error: " + repr(k)) # no match found
self._table[s] = ProbeHashMap._AVAIL # mark as vacated
def __iter__(self):
for j in range(len(self._table)): # scan entire table
if not self._is_available(j):
yield self._table[j]._key
| CHAPTER 10 (maps, hash tables and skip lists)/probe_hash_map_class.py | 2,186 | Hash map implemented with linear probing for collision resolution.
Search for key k in bucket at index j.
Return (success, index) tuple, described as follows:
If match was found, success is True and index denotes its location.
If no match found, success is False and index denotes first available slot.
Return True if the index j is available in the table.
sentinal marks locations of previous deletions mark this as first avail search has failed found a match keep looking (cyclically) no match found insert new item size has increased overwrite existing no match found mark as vacated scan entire table | 607 | en | 0.912212 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('filer', '0009_auto_20171220_1635'),
]
operations = [
migrations.AlterField(
model_name='image',
name='file_ptr',
field=models.OneToOneField(primary_key=True, serialize=False, related_name='filer_image_file', parent_link=True, to='filer.File', on_delete=models.CASCADE),
),
]
| filer/migrations/0010_auto_20180414_2058.py | 519 | -*- coding: utf-8 -*- | 21 | en | 0.767281 |
"""Tests for solver module
"""
# from mathgrid import solver
from mathgrid import solver
def test_calculator_01():
assert solver.calculator('=((1+3)*2)/(6-4)') == 4
assert solver.calculator('((1+3)*2)/(6-4)') == '((1+3)*2)/(6-4)'
assert solver.calculator('=hola') == 'hola'
| tests/test_solver.py | 289 | Tests for solver module
from mathgrid import solver | 53 | en | 0.744333 |
"""
Tests for various datasette helper functions.
"""
from datasette.app import Datasette
from datasette import utils
from datasette.utils.asgi import Request
from datasette.utils.sqlite import sqlite3
import json
import os
import pathlib
import pytest
import tempfile
from unittest.mock import patch
@pytest.mark.parametrize(
"path,expected",
[
("foo", ["foo"]),
("foo,bar", ["foo", "bar"]),
("123,433,112", ["123", "433", "112"]),
("123~2C433,112", ["123,433", "112"]),
("123~2F433~2F112", ["123/433/112"]),
],
)
def test_urlsafe_components(path, expected):
assert expected == utils.urlsafe_components(path)
@pytest.mark.parametrize(
"path,added_args,expected",
[
("/foo", {"bar": 1}, "/foo?bar=1"),
("/foo?bar=1", {"baz": 2}, "/foo?bar=1&baz=2"),
("/foo?bar=1&bar=2", {"baz": 3}, "/foo?bar=1&bar=2&baz=3"),
("/foo?bar=1", {"bar": None}, "/foo"),
# Test order is preserved
(
"/?_facet=prim_state&_facet=area_name",
(("prim_state", "GA"),),
"/?_facet=prim_state&_facet=area_name&prim_state=GA",
),
(
"/?_facet=state&_facet=city&state=MI",
(("city", "Detroit"),),
"/?_facet=state&_facet=city&state=MI&city=Detroit",
),
(
"/?_facet=state&_facet=city",
(("_facet", "planet_int"),),
"/?_facet=state&_facet=city&_facet=planet_int",
),
],
)
def test_path_with_added_args(path, added_args, expected):
request = Request.fake(path)
actual = utils.path_with_added_args(request, added_args)
assert expected == actual
@pytest.mark.parametrize(
"path,args,expected",
[
("/foo?bar=1", {"bar"}, "/foo"),
("/foo?bar=1&baz=2", {"bar"}, "/foo?baz=2"),
("/foo?bar=1&bar=2&bar=3", {"bar": "2"}, "/foo?bar=1&bar=3"),
],
)
def test_path_with_removed_args(path, args, expected):
request = Request.fake(path)
actual = utils.path_with_removed_args(request, args)
assert expected == actual
# Run the test again but this time use the path= argument
request = Request.fake("/")
actual = utils.path_with_removed_args(request, args, path=path)
assert expected == actual
@pytest.mark.parametrize(
"path,args,expected",
[
("/foo?bar=1", {"bar": 2}, "/foo?bar=2"),
("/foo?bar=1&baz=2", {"bar": None}, "/foo?baz=2"),
],
)
def test_path_with_replaced_args(path, args, expected):
request = Request.fake(path)
actual = utils.path_with_replaced_args(request, args)
assert expected == actual
@pytest.mark.parametrize(
"row,pks,expected_path",
[
({"A": "foo", "B": "bar"}, ["A", "B"], "foo,bar"),
({"A": "f,o", "B": "bar"}, ["A", "B"], "f~2Co,bar"),
({"A": 123}, ["A"], "123"),
(
utils.CustomRow(
["searchable_id", "tag"],
[
("searchable_id", {"value": 1, "label": "1"}),
("tag", {"value": "feline", "label": "feline"}),
],
),
["searchable_id", "tag"],
"1,feline",
),
],
)
def test_path_from_row_pks(row, pks, expected_path):
actual_path = utils.path_from_row_pks(row, pks, False)
assert expected_path == actual_path
@pytest.mark.parametrize(
"obj,expected",
[
(
{
"Description": "Soft drinks",
"Picture": b"\x15\x1c\x02\xc7\xad\x05\xfe",
"CategoryID": 1,
},
"""
{"CategoryID": 1, "Description": "Soft drinks", "Picture": {"$base64": true, "encoded": "FRwCx60F/g=="}}
""".strip(),
)
],
)
def test_custom_json_encoder(obj, expected):
actual = json.dumps(obj, cls=utils.CustomJSONEncoder, sort_keys=True)
assert expected == actual
@pytest.mark.parametrize(
"bad_sql",
[
"update blah;",
"-- sql comment to skip\nupdate blah;",
"update blah set some_column='# Hello there\n\n* This is a list\n* of items\n--\n[And a link](https://github.com/simonw/datasette-render-markdown).'\nas demo_markdown",
"PRAGMA case_sensitive_like = true",
"SELECT * FROM pragma_not_on_allow_list('idx52')",
],
)
def test_validate_sql_select_bad(bad_sql):
with pytest.raises(utils.InvalidSql):
utils.validate_sql_select(bad_sql)
@pytest.mark.parametrize(
"good_sql",
[
"select count(*) from airports",
"select foo from bar",
"--sql comment to skip\nselect foo from bar",
"select '# Hello there\n\n* This is a list\n* of items\n--\n[And a link](https://github.com/simonw/datasette-render-markdown).'\nas demo_markdown",
"select 1 + 1",
"explain select 1 + 1",
"explain\nselect 1 + 1",
"explain query plan select 1 + 1",
"explain query plan\nselect 1 + 1",
"SELECT\nblah FROM foo",
"WITH RECURSIVE cnt(x) AS (SELECT 1 UNION ALL SELECT x+1 FROM cnt LIMIT 10) SELECT x FROM cnt;",
"explain WITH RECURSIVE cnt(x) AS (SELECT 1 UNION ALL SELECT x+1 FROM cnt LIMIT 10) SELECT x FROM cnt;",
"explain query plan WITH RECURSIVE cnt(x) AS (SELECT 1 UNION ALL SELECT x+1 FROM cnt LIMIT 10) SELECT x FROM cnt;",
"SELECT * FROM pragma_index_info('idx52')",
"select * from pragma_table_xinfo('table')",
],
)
def test_validate_sql_select_good(good_sql):
utils.validate_sql_select(good_sql)
@pytest.mark.parametrize("open_quote,close_quote", [('"', '"'), ("[", "]")])
def test_detect_fts(open_quote, close_quote):
sql = """
CREATE TABLE "Dumb_Table" (
"TreeID" INTEGER,
"qSpecies" TEXT
);
CREATE TABLE "Street_Tree_List" (
"TreeID" INTEGER,
"qSpecies" TEXT,
"qAddress" TEXT,
"SiteOrder" INTEGER,
"qSiteInfo" TEXT,
"PlantType" TEXT,
"qCaretaker" TEXT
);
CREATE VIEW Test_View AS SELECT * FROM Dumb_Table;
CREATE VIRTUAL TABLE {open}Street_Tree_List_fts{close} USING FTS4 ("qAddress", "qCaretaker", "qSpecies", content={open}Street_Tree_List{close});
CREATE VIRTUAL TABLE r USING rtree(a, b, c);
""".format(
open=open_quote, close=close_quote
)
conn = utils.sqlite3.connect(":memory:")
conn.executescript(sql)
assert None is utils.detect_fts(conn, "Dumb_Table")
assert None is utils.detect_fts(conn, "Test_View")
assert None is utils.detect_fts(conn, "r")
assert "Street_Tree_List_fts" == utils.detect_fts(conn, "Street_Tree_List")
@pytest.mark.parametrize("table", ("regular", "has'single quote"))
def test_detect_fts_different_table_names(table):
sql = """
CREATE TABLE [{table}] (
"TreeID" INTEGER,
"qSpecies" TEXT
);
CREATE VIRTUAL TABLE [{table}_fts] USING FTS4 ("qSpecies", content="{table}");
""".format(
table=table
)
conn = utils.sqlite3.connect(":memory:")
conn.executescript(sql)
assert "{table}_fts".format(table=table) == utils.detect_fts(conn, table)
@pytest.mark.parametrize(
"url,expected",
[
("http://www.google.com/", True),
("https://example.com/", True),
("www.google.com", False),
("http://www.google.com/ is a search engine", False),
],
)
def test_is_url(url, expected):
assert expected == utils.is_url(url)
@pytest.mark.parametrize(
"s,expected",
[
("simple", "simple"),
("MixedCase", "MixedCase"),
("-no-leading-hyphens", "no-leading-hyphens-65bea6"),
("_no-leading-underscores", "no-leading-underscores-b921bc"),
("no spaces", "no-spaces-7088d7"),
("-", "336d5e"),
("no $ characters", "no--characters-59e024"),
],
)
def test_to_css_class(s, expected):
assert expected == utils.to_css_class(s)
def test_temporary_docker_directory_uses_hard_link():
with tempfile.TemporaryDirectory() as td:
os.chdir(td)
with open("hello", "w") as fp:
fp.write("world")
# Default usage of this should use symlink
with utils.temporary_docker_directory(
files=["hello"],
name="t",
metadata=None,
extra_options=None,
branch=None,
template_dir=None,
plugins_dir=None,
static=[],
install=[],
spatialite=False,
version_note=None,
secret="secret",
) as temp_docker:
hello = os.path.join(temp_docker, "hello")
with open(hello) as fp:
assert "world" == fp.read()
# It should be a hard link
assert 2 == os.stat(hello).st_nlink
@patch("os.link")
def test_temporary_docker_directory_uses_copy_if_hard_link_fails(mock_link):
# Copy instead if os.link raises OSError (normally due to different device)
mock_link.side_effect = OSError
with tempfile.TemporaryDirectory() as td:
os.chdir(td)
with open("hello", "w") as fp:
fp.write("world")
# Default usage of this should use symlink
with utils.temporary_docker_directory(
files=["hello"],
name="t",
metadata=None,
extra_options=None,
branch=None,
template_dir=None,
plugins_dir=None,
static=[],
install=[],
spatialite=False,
version_note=None,
secret=None,
) as temp_docker:
hello = os.path.join(temp_docker, "hello")
with open(hello) as fp:
assert "world" == fp.read()
# It should be a copy, not a hard link
assert 1 == os.stat(hello).st_nlink
def test_temporary_docker_directory_quotes_args():
with tempfile.TemporaryDirectory() as td:
os.chdir(td)
with open("hello", "w") as fp:
fp.write("world")
with utils.temporary_docker_directory(
files=["hello"],
name="t",
metadata=None,
extra_options="--$HOME",
branch=None,
template_dir=None,
plugins_dir=None,
static=[],
install=[],
spatialite=False,
version_note="$PWD",
secret="secret",
) as temp_docker:
df = os.path.join(temp_docker, "Dockerfile")
with open(df) as fp:
df_contents = fp.read()
assert "'$PWD'" in df_contents
assert "'--$HOME'" in df_contents
assert "ENV DATASETTE_SECRET 'secret'" in df_contents
def test_compound_keys_after_sql():
assert "((a > :p0))" == utils.compound_keys_after_sql(["a"])
assert """
((a > :p0)
or
(a = :p0 and b > :p1))
""".strip() == utils.compound_keys_after_sql(
["a", "b"]
)
assert """
((a > :p0)
or
(a = :p0 and b > :p1)
or
(a = :p0 and b = :p1 and c > :p2))
""".strip() == utils.compound_keys_after_sql(
["a", "b", "c"]
)
async def table_exists(table):
return table == "exists.csv"
@pytest.mark.asyncio
@pytest.mark.parametrize(
"table_and_format,expected_table,expected_format",
[
("blah", "blah", None),
("blah.csv", "blah", "csv"),
("blah.json", "blah", "json"),
("blah.baz", "blah.baz", None),
("exists.csv", "exists.csv", None),
],
)
async def test_resolve_table_and_format(
table_and_format, expected_table, expected_format
):
actual_table, actual_format = await utils.resolve_table_and_format(
table_and_format, table_exists, ["json"]
)
assert expected_table == actual_table
assert expected_format == actual_format
def test_table_columns():
conn = sqlite3.connect(":memory:")
conn.executescript(
"""
create table places (id integer primary key, name text, bob integer)
"""
)
assert ["id", "name", "bob"] == utils.table_columns(conn, "places")
@pytest.mark.parametrize(
"path,format,extra_qs,expected",
[
("/foo?sql=select+1", "csv", {}, "/foo.csv?sql=select+1"),
("/foo?sql=select+1", "json", {}, "/foo.json?sql=select+1"),
("/foo/bar", "json", {}, "/foo/bar.json"),
("/foo/bar", "csv", {}, "/foo/bar.csv"),
("/foo/bar", "csv", {"_dl": 1}, "/foo/bar.csv?_dl=1"),
(
"/sf-trees/Street_Tree_List?_search=cherry&_size=1000",
"csv",
{"_dl": 1},
"/sf-trees/Street_Tree_List.csv?_search=cherry&_size=1000&_dl=1",
),
],
)
def test_path_with_format(path, format, extra_qs, expected):
request = Request.fake(path)
actual = utils.path_with_format(request=request, format=format, extra_qs=extra_qs)
assert expected == actual
@pytest.mark.parametrize(
"bytes,expected",
[
(120, "120 bytes"),
(1024, "1.0 KB"),
(1024 * 1024, "1.0 MB"),
(1024 * 1024 * 1024, "1.0 GB"),
(1024 * 1024 * 1024 * 1.3, "1.3 GB"),
(1024 * 1024 * 1024 * 1024, "1.0 TB"),
],
)
def test_format_bytes(bytes, expected):
assert expected == utils.format_bytes(bytes)
@pytest.mark.parametrize(
"query,expected",
[
("dog", '"dog"'),
("cat,", '"cat,"'),
("cat dog", '"cat" "dog"'),
# If a phrase is already double quoted, leave it so
('"cat dog"', '"cat dog"'),
('"cat dog" fish', '"cat dog" "fish"'),
# Sensibly handle unbalanced double quotes
('cat"', '"cat"'),
('"cat dog" "fish', '"cat dog" "fish"'),
],
)
def test_escape_fts(query, expected):
assert expected == utils.escape_fts(query)
@pytest.mark.parametrize(
"input,expected",
[
("dog", "dog"),
('dateutil_parse("1/2/2020")', r"dateutil_parse(\0000221/2/2020\000022)"),
("this\r\nand\r\nthat", r"this\00000Aand\00000Athat"),
],
)
def test_escape_css_string(input, expected):
assert expected == utils.escape_css_string(input)
def test_check_connection_spatialite_raises():
path = str(pathlib.Path(__file__).parent / "spatialite.db")
conn = sqlite3.connect(path)
with pytest.raises(utils.SpatialiteConnectionProblem):
utils.check_connection(conn)
def test_check_connection_passes():
conn = sqlite3.connect(":memory:")
utils.check_connection(conn)
def test_call_with_supported_arguments():
def foo(a, b):
return f"{a}+{b}"
assert "1+2" == utils.call_with_supported_arguments(foo, a=1, b=2)
assert "1+2" == utils.call_with_supported_arguments(foo, a=1, b=2, c=3)
with pytest.raises(TypeError):
utils.call_with_supported_arguments(foo, a=1)
@pytest.mark.parametrize(
"data,should_raise",
[
([["foo", "bar"], ["foo", "baz"]], False),
([("foo", "bar"), ("foo", "baz")], False),
((["foo", "bar"], ["foo", "baz"]), False),
([["foo", "bar"], ["foo", "baz", "bax"]], True),
({"foo": ["bar", "baz"]}, False),
({"foo": ("bar", "baz")}, False),
({"foo": "bar"}, True),
],
)
def test_multi_params(data, should_raise):
if should_raise:
with pytest.raises(AssertionError):
utils.MultiParams(data)
return
p1 = utils.MultiParams(data)
assert "bar" == p1["foo"]
assert ["bar", "baz"] == list(p1.getlist("foo"))
@pytest.mark.parametrize(
"actor,allow,expected",
[
# Default is to allow:
(None, None, True),
# {} means deny-all:
(None, {}, False),
({"id": "root"}, {}, False),
# true means allow-all
({"id": "root"}, True, True),
(None, True, True),
# false means deny-all
({"id": "root"}, False, False),
(None, False, False),
# Special case for "unauthenticated": true
(None, {"unauthenticated": True}, True),
(None, {"unauthenticated": False}, False),
# Match on just one property:
(None, {"id": "root"}, False),
({"id": "root"}, None, True),
({"id": "simon", "staff": True}, {"staff": True}, True),
({"id": "simon", "staff": False}, {"staff": True}, False),
# Special "*" value for any key:
({"id": "root"}, {"id": "*"}, True),
({}, {"id": "*"}, False),
({"name": "root"}, {"id": "*"}, False),
# Supports single strings or list of values:
({"id": "root"}, {"id": "bob"}, False),
({"id": "root"}, {"id": ["bob"]}, False),
({"id": "root"}, {"id": "root"}, True),
({"id": "root"}, {"id": ["root"]}, True),
# Any matching role will work:
({"id": "garry", "roles": ["staff", "dev"]}, {"roles": ["staff"]}, True),
({"id": "garry", "roles": ["staff", "dev"]}, {"roles": ["dev"]}, True),
({"id": "garry", "roles": ["staff", "dev"]}, {"roles": ["otter"]}, False),
({"id": "garry", "roles": ["staff", "dev"]}, {"roles": ["dev", "otter"]}, True),
({"id": "garry", "roles": []}, {"roles": ["staff"]}, False),
({"id": "garry"}, {"roles": ["staff"]}, False),
# Any single matching key works:
({"id": "root"}, {"bot_id": "my-bot", "id": ["root"]}, True),
],
)
def test_actor_matches_allow(actor, allow, expected):
assert expected == utils.actor_matches_allow(actor, allow)
@pytest.mark.parametrize(
"config,expected",
[
({"foo": "bar"}, {"foo": "bar"}),
({"$env": "FOO"}, "x"),
({"k": {"$env": "FOO"}}, {"k": "x"}),
([{"k": {"$env": "FOO"}}, {"z": {"$env": "FOO"}}], [{"k": "x"}, {"z": "x"}]),
({"k": [{"in_a_list": {"$env": "FOO"}}]}, {"k": [{"in_a_list": "x"}]}),
],
)
def test_resolve_env_secrets(config, expected):
assert expected == utils.resolve_env_secrets(config, {"FOO": "x"})
@pytest.mark.parametrize(
"actor,expected",
[
({"id": "blah"}, "blah"),
({"id": "blah", "login": "l"}, "l"),
({"id": "blah", "login": "l"}, "l"),
({"id": "blah", "login": "l", "username": "u"}, "u"),
({"login": "l", "name": "n"}, "n"),
(
{"id": "blah", "login": "l", "username": "u", "name": "n", "display": "d"},
"d",
),
({"weird": "shape"}, "{'weird': 'shape'}"),
],
)
def test_display_actor(actor, expected):
assert expected == utils.display_actor(actor)
@pytest.mark.asyncio
@pytest.mark.parametrize(
"dbs,expected_path",
[
(["one_table"], "/one/one"),
(["two_tables"], "/two"),
(["one_table", "two_tables"], "/"),
],
)
async def test_initial_path_for_datasette(tmp_path_factory, dbs, expected_path):
db_dir = tmp_path_factory.mktemp("dbs")
one_table = str(db_dir / "one.db")
sqlite3.connect(one_table).execute("create table one (id integer primary key)")
two_tables = str(db_dir / "two.db")
sqlite3.connect(two_tables).execute("create table two (id integer primary key)")
sqlite3.connect(two_tables).execute("create table three (id integer primary key)")
datasette = Datasette(
[{"one_table": one_table, "two_tables": two_tables}[db] for db in dbs]
)
path = await utils.initial_path_for_datasette(datasette)
assert path == expected_path
@pytest.mark.parametrize(
"content,expected",
(
("title: Hello", {"title": "Hello"}),
('{"title": "Hello"}', {"title": "Hello"}),
("{{ this }} is {{ bad }}", None),
),
)
def test_parse_metadata(content, expected):
if expected is None:
with pytest.raises(utils.BadMetadataError):
utils.parse_metadata(content)
else:
assert utils.parse_metadata(content) == expected
@pytest.mark.asyncio
@pytest.mark.parametrize(
"sql,expected",
(
("select 1", []),
("select 1 + :one", ["one"]),
("select 1 + :one + :two", ["one", "two"]),
("select 'bob' || '0:00' || :cat", ["cat"]),
("select this is invalid :one, :two, :three", ["one", "two", "three"]),
),
)
async def test_derive_named_parameters(sql, expected):
ds = Datasette([], memory=True)
db = ds.get_database("_memory")
params = await utils.derive_named_parameters(db, sql)
assert params == expected
@pytest.mark.parametrize(
"original,expected",
(
("abc", "abc"),
("/foo/bar", "~2Ffoo~2Fbar"),
("/-/bar", "~2F-~2Fbar"),
("-/db-/table.csv", "-~2Fdb-~2Ftable~2Ecsv"),
(r"%~-/", "~25~7E-~2F"),
("~25~7E~2D~2F", "~7E25~7E7E~7E2D~7E2F"),
),
)
def test_tilde_encoding(original, expected):
actual = utils.tilde_encode(original)
assert actual == expected
# And test round-trip
assert original == utils.tilde_decode(actual)
| tests/test_utils.py | 20,731 | Tests for various datasette helper functions.
Test order is preserved Run the test again but this time use the path= argument Default usage of this should use symlink It should be a hard link Copy instead if os.link raises OSError (normally due to different device) Default usage of this should use symlink It should be a copy, not a hard link If a phrase is already double quoted, leave it so Sensibly handle unbalanced double quotes Default is to allow: {} means deny-all: true means allow-all false means deny-all Special case for "unauthenticated": true Match on just one property: Special "*" value for any key: Supports single strings or list of values: Any matching role will work: Any single matching key works: And test round-trip | 741 | en | 0.763793 |
import pgzero
import pgzrun
import random
from pgzero.actor import Actor
__all__ = ["pgzrun", "pgzero"]
from pgzero.clock import clock
from pgzero.keyboard import keyboard
from pgzero.loaders import sounds
clouds = [Actor('cloud1', (200, 200)),
Actor('cloud2', (400, 300)),
Actor('cloud3', (600, 200)),
Actor('cloud1', (800, 300))]
obstacles = [Actor('cactus', (random.randint(900, 1000), 495)),
Actor('cactus', (random.randint(1200, 1500), 495)),
Actor('cactus', (random.randint(1500, 2000), 495))]
player = Actor('p3_stand', (100, 484))
# 0 - game not started
# 1 - game just stared
# 2 - finished
game = 0
# frame that is currently running
frame = 0
# player movement speed and direction
jump = 0
# 0 - jump is available
# 1 - jump is forbidden
jump_blocked = 0
cloud_speed = 2
game_time = 0
# cactus movement speed
game_speed = 8
# 0 - game running
# 1 - game blocked
jump_unblocked = 0
def draw():
global game
screen.clear()
screen.fill('#cff4f7')
for i in range((screen.width // 70) + 1):
screen.blit('grass', (i * 70, screen.height - 70))
for cloud in clouds:
cloud.draw()
for obstacle in obstacles:
obstacle.draw()
screen.draw.text(
align_text_time(game_time),
midright=(screen.width - 50, 50),
fontname="roboto_mono_bold",
color="orange",
fontsize=45
)
player.draw()
if game == 0:
screen.draw.text(
"Wcisnij spacje",
center=(screen.width / 2, screen.height / 2),
color="orange",
fontsize=60
)
if game == 2:
screen.draw.text(
"Koniec gry",
center=(screen.width / 2, screen.height / 2),
color="red",
fontsize=60
)
screen.draw.text(
"Wcisnij spacje aby zagrac jeszcze raz",
center=(screen.width / 2, screen.height - 200),
color="red",
fontsize=30
)
def update():
global game
global jump
global jump_blocked
global jump_unblocked
if keyboard.SPACE and jump_unblocked == 0:
if game == 0 or game == 2:
jump_blocked = 1
clock.schedule_unique(unblock_jump, 0.3)
reset()
game = 1
if jump_blocked == 0:
jump = -18
jump_blocked = 1
sounds.jingles_jump.play()
animation()
jump_fall()
move_cloud()
move_obstacle()
check_collision()
# change difficulty level, increase game and clouds speed
def change_difficulty_level():
global game_speed
global cloud_speed
if game_speed < 16:
game_speed += 1
cloud_speed += 1
# reset global variables
def reset():
global frame
global game
global jump
global jump_blocked
global cloud_speed
global game_speed
global game_time
if game == 2:
frame = 0
game = 0
jump = 0
jump_blocked = 1
cloud_speed = 2
game_speed = 8
game_time = 0
player.pos = (100, 484)
clouds[0].pos = (200, 200)
clouds[1].pos = (400, 300)
clouds[2].pos = (600, 200)
clouds[3].pos = (800, 300)
obstacles[0].pos = (random.randint(900, 1000), 495)
obstacles[1].pos = (random.randint(1200, 1500), 495)
obstacles[2].pos = (random.randint(1500, 2000), 495)
clock.unschedule(change_difficulty_level)
# change difficulty level every 20s
clock.schedule_interval(change_difficulty_level, 20)
def unblock_game():
global jump_unblocked
jump_unblocked = 0
# check collision with cactus
def check_collision():
global game
global jump_unblocked
if game == 1:
for i in obstacles:
if player.collidepoint(i.x, i.y):
game = 2
sounds.jingles_end.play()
jump_unblocked = 1
# unblock game in 2 sec
clock.schedule_unique(unblock_game, 2.0)
def move_obstacle():
global game_speed
global game
if game == 1:
for i in range(len(obstacles)):
# decrease x for all obstacles about speed value
obstacles[i].x -= game_speed
# if obstacles is out of screen get random position
if obstacles[i].x + 35 < 0:
obstacles[i].x = random.randint(900, 1500)
# if obstacles have the same position as other or is too close, move it about 400
for j in range(0, len(obstacles)):
if j != i and abs(obstacles[i].x - obstacles[j].x < 300):
obstacles[i].x += 400
# triggered every 0.1s increasing game time about 1s
def measure_time():
global game_time
global game
if game == 0:
game_time = 0
elif game == 1:
game_time +=1
def align_text_time(time):
text = "0" * (5 - len(str(time)))
text += str(time)
return text
def move_cloud():
global cloud_speed
global game
if game == 1:
# move clouds x pos about cloud speed
for cloud in clouds:
cloud.x -= cloud_speed
# if cloud out of screen move it to right side
if cloud.x + 64 < 0:
cloud.x = screen.width + 32
def unblock_jump():
global jump_blocked
jump_blocked = 0
def jump_fall():
global jump
global frame
if jump != 0:
# block animation
frame = 0
player.y += jump
# if player on the ground unblock
if player.y >= 484:
unblock_jump()
jump = 0
# if player jumped start falling
if player.y <= 250:
jump *= (-1)
# player animation
def animation():
global frame
if game == 1:
if frame == 0:
player.image = 'p3_walk01'
if frame == 1:
player.image = 'p3_walk02'
if frame == 2:
player.image = 'p3_walk03'
if frame == 3:
player.image = 'p3_walk04'
if frame == 4:
player.image = 'p3_walk05'
if frame == 5:
player.image = 'p3_walk06'
if frame == 6:
player.image = 'p3_walk07'
if frame == 7:
player.image = 'p3_walk08'
if frame == 8:
player.image = 'p3_walk09'
if frame == 9:
player.image = 'p3_walk10'
if frame == 10:
player.image = 'p3_walk11'
frame += 1
# result is 0 or less than 11
frame %= 11
clock.schedule_interval(measure_time, 0.1)
clock.schedule_interval(change_difficulty_level, 20)
pgzrun.go() | dino/main.py | 6,669 | 0 - game not started 1 - game just stared 2 - finished frame that is currently running player movement speed and direction 0 - jump is available 1 - jump is forbidden cactus movement speed 0 - game running 1 - game blocked change difficulty level, increase game and clouds speed reset global variables change difficulty level every 20s check collision with cactus unblock game in 2 sec decrease x for all obstacles about speed value if obstacles is out of screen get random position if obstacles have the same position as other or is too close, move it about 400 triggered every 0.1s increasing game time about 1s move clouds x pos about cloud speed if cloud out of screen move it to right side block animation if player on the ground unblock if player jumped start falling player animation result is 0 or less than 11 | 818 | en | 0.92168 |
"""
A pure python (slow) implementation of rijndael with a decent interface
To include -
from rijndael import rijndael
To do a key setup -
r = rijndael(key, block_size = 16)
key must be a string of length 16, 24, or 32
blocksize must be 16, 24, or 32. Default is 16
To use -
ciphertext = r.encrypt(plaintext)
plaintext = r.decrypt(ciphertext)
If any strings are of the wrong length a ValueError is thrown
"""
# ported from the Java reference code by Bram Cohen, April 2001
# this code is public domain, unless someone makes
# an intellectual property claim against the reference
# code, in which case it can be made public domain by
# deleting all the comments and renaming all the variables
import copy
import string
shifts = [[[0, 0], [1, 3], [2, 2], [3, 1]],
[[0, 0], [1, 5], [2, 4], [3, 3]],
[[0, 0], [1, 7], [3, 5], [4, 4]]]
# [keysize][block_size]
num_rounds = {16: {16: 10, 24: 12, 32: 14}, 24: {16: 12, 24: 12, 32: 14}, 32: {16: 14, 24: 14, 32: 14}}
A = [[1, 1, 1, 1, 1, 0, 0, 0],
[0, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 0],
[0, 0, 0, 1, 1, 1, 1, 1],
[1, 0, 0, 0, 1, 1, 1, 1],
[1, 1, 0, 0, 0, 1, 1, 1],
[1, 1, 1, 0, 0, 0, 1, 1],
[1, 1, 1, 1, 0, 0, 0, 1]]
# produce log and alog tables, needed for multiplying in the
# field GF(2^m) (generator = 3)
alog = [1]
for i in range(255):
j = (alog[-1] << 1) ^ alog[-1]
if j & 0x100 != 0:
j ^= 0x11B
alog.append(j)
log = [0] * 256
for i in range(1, 255):
log[alog[i]] = i
# multiply two elements of GF(2^m)
def mul(a, b):
if a == 0 or b == 0:
return 0
return alog[(log[a & 0xFF] + log[b & 0xFF]) % 255]
# substitution box based on F^{-1}(x)
box = [[0] * 8 for i in range(256)]
box[1][7] = 1
for i in range(2, 256):
j = alog[255 - log[i]]
for t in range(8):
box[i][t] = (j >> (7 - t)) & 0x01
B = [0, 1, 1, 0, 0, 0, 1, 1]
# affine transform: box[i] <- B + A*box[i]
cox = [[0] * 8 for i in range(256)]
for i in range(256):
for t in range(8):
cox[i][t] = B[t]
for j in range(8):
cox[i][t] ^= A[t][j] * box[i][j]
# S-boxes and inverse S-boxes
S = [0] * 256
Si = [0] * 256
for i in range(256):
S[i] = cox[i][0] << 7
for t in range(1, 8):
S[i] ^= cox[i][t] << (7-t)
Si[S[i] & 0xFF] = i
# T-boxes
G = [[2, 1, 1, 3],
[3, 2, 1, 1],
[1, 3, 2, 1],
[1, 1, 3, 2]]
AA = [[0] * 8 for i in range(4)]
for i in range(4):
for j in range(4):
AA[i][j] = G[i][j]
AA[i][i+4] = 1
for i in range(4):
pivot = AA[i][i]
if pivot == 0:
t = i + 1
while AA[t][i] == 0 and t < 4:
t += 1
assert t != 4, 'G matrix must be invertible'
for j in range(8):
AA[i][j], AA[t][j] = AA[t][j], AA[i][j]
pivot = AA[i][i]
for j in range(8):
if AA[i][j] != 0:
AA[i][j] = alog[(255 + log[AA[i][j] & 0xFF] - log[pivot & 0xFF]) % 255]
for t in range(4):
if i != t:
for j in range(i+1, 8):
AA[t][j] ^= mul(AA[i][j], AA[t][i])
AA[t][i] = 0
iG = [[0] * 4 for i in range(4)]
for i in range(4):
for j in range(4):
iG[i][j] = AA[i][j + 4]
def mul4(a, bs):
if a == 0:
return 0
r = 0
for b in bs:
r <<= 8
if b != 0:
r = r | mul(a, b)
return r
T1 = []
T2 = []
T3 = []
T4 = []
T5 = []
T6 = []
T7 = []
T8 = []
U1 = []
U2 = []
U3 = []
U4 = []
for t in range(256):
s = S[t]
T1.append(mul4(s, G[0]))
T2.append(mul4(s, G[1]))
T3.append(mul4(s, G[2]))
T4.append(mul4(s, G[3]))
s = Si[t]
T5.append(mul4(s, iG[0]))
T6.append(mul4(s, iG[1]))
T7.append(mul4(s, iG[2]))
T8.append(mul4(s, iG[3]))
U1.append(mul4(t, iG[0]))
U2.append(mul4(t, iG[1]))
U3.append(mul4(t, iG[2]))
U4.append(mul4(t, iG[3]))
# round constants
rcon = [1]
r = 1
for t in range(1, 30):
r = mul(2, r)
rcon.append(r)
del A
del AA
del pivot
del B
del G
del box
del log
del alog
del i
del j
del r
del s
del t
del mul
del mul4
del cox
del iG
class rijndael(object):
def __init__(self, key, block_size = 16):
if block_size != 16 and block_size != 24 and block_size != 32:
raise ValueError('Invalid block size: ' + str(block_size))
if len(key) != 16 and len(key) != 24 and len(key) != 32:
raise ValueError('Invalid key size: ' + str(len(key)))
self.block_size = block_size
ROUNDS = num_rounds[len(key)][block_size]
BC = block_size // 4
# encryption round keys
Ke = [[0] * BC for i in range(ROUNDS + 1)]
# decryption round keys
Kd = [[0] * BC for i in range(ROUNDS + 1)]
ROUND_KEY_COUNT = (ROUNDS + 1) * BC
KC = len(key) // 4
# copy user material bytes into temporary ints
tk = []
for i in range(0, KC):
tk.append((ord(key[i * 4]) << 24) | (ord(key[i * 4 + 1]) << 16) |
(ord(key[i * 4 + 2]) << 8) | ord(key[i * 4 + 3]))
# copy values into round key arrays
t = 0
j = 0
while j < KC and t < ROUND_KEY_COUNT:
Ke[t // BC][t % BC] = tk[j]
Kd[ROUNDS - (t // BC)][t % BC] = tk[j]
j += 1
t += 1
tt = 0
rconpointer = 0
while t < ROUND_KEY_COUNT:
# extrapolate using phi (the round key evolution function)
tt = tk[KC - 1]
tk[0] ^= (S[(tt >> 16) & 0xFF] & 0xFF) << 24 ^ \
(S[(tt >> 8) & 0xFF] & 0xFF) << 16 ^ \
(S[ tt & 0xFF] & 0xFF) << 8 ^ \
(S[(tt >> 24) & 0xFF] & 0xFF) ^ \
(rcon[rconpointer] & 0xFF) << 24
rconpointer += 1
if KC != 8:
for i in range(1, KC):
tk[i] ^= tk[i-1]
else:
for i in range(1, KC // 2):
tk[i] ^= tk[i-1]
tt = tk[KC // 2 - 1]
tk[KC // 2] ^= (S[ tt & 0xFF] & 0xFF) ^ \
(S[(tt >> 8) & 0xFF] & 0xFF) << 8 ^ \
(S[(tt >> 16) & 0xFF] & 0xFF) << 16 ^ \
(S[(tt >> 24) & 0xFF] & 0xFF) << 24
for i in range(KC // 2 + 1, KC):
tk[i] ^= tk[i-1]
# copy values into round key arrays
j = 0
while j < KC and t < ROUND_KEY_COUNT:
Ke[t // BC][t % BC] = tk[j]
Kd[ROUNDS - (t // BC)][t % BC] = tk[j]
j += 1
t += 1
# inverse MixColumn where needed
for r in range(1, ROUNDS):
for j in range(BC):
tt = Kd[r][j]
Kd[r][j] = U1[(tt >> 24) & 0xFF] ^ \
U2[(tt >> 16) & 0xFF] ^ \
U3[(tt >> 8) & 0xFF] ^ \
U4[ tt & 0xFF]
self.Ke = Ke
self.Kd = Kd
def encrypt(self, plaintext):
if len(plaintext) != self.block_size:
raise ValueError('wrong block length, expected ' + str(self.block_size) + ' got ' + str(len(plaintext)))
Ke = self.Ke
BC = self.block_size // 4
ROUNDS = len(Ke) - 1
if BC == 4:
SC = 0
elif BC == 6:
SC = 1
else:
SC = 2
s1 = shifts[SC][1][0]
s2 = shifts[SC][2][0]
s3 = shifts[SC][3][0]
a = [0] * BC
# temporary work array
t = []
# plaintext to ints + key
for i in range(BC):
t.append((ord(plaintext[i * 4 ]) << 24 |
ord(plaintext[i * 4 + 1]) << 16 |
ord(plaintext[i * 4 + 2]) << 8 |
ord(plaintext[i * 4 + 3]) ) ^ Ke[0][i])
# apply round transforms
for r in range(1, ROUNDS):
for i in range(BC):
a[i] = (T1[(t[ i ] >> 24) & 0xFF] ^
T2[(t[(i + s1) % BC] >> 16) & 0xFF] ^
T3[(t[(i + s2) % BC] >> 8) & 0xFF] ^
T4[ t[(i + s3) % BC] & 0xFF] ) ^ Ke[r][i]
t = copy.copy(a)
# last round is special
result = []
for i in range(BC):
tt = Ke[ROUNDS][i]
result.append((S[(t[ i ] >> 24) & 0xFF] ^ (tt >> 24)) & 0xFF)
result.append((S[(t[(i + s1) % BC] >> 16) & 0xFF] ^ (tt >> 16)) & 0xFF)
result.append((S[(t[(i + s2) % BC] >> 8) & 0xFF] ^ (tt >> 8)) & 0xFF)
result.append((S[ t[(i + s3) % BC] & 0xFF] ^ tt ) & 0xFF)
return ''.join(map(chr, result))
def decrypt(self, ciphertext):
if len(ciphertext) != self.block_size:
raise ValueError('wrong block length, expected ' + str(self.block_size) + ' got ' + str(len(ciphertext)))
Kd = self.Kd
BC = self.block_size // 4
ROUNDS = len(Kd) - 1
if BC == 4:
SC = 0
elif BC == 6:
SC = 1
else:
SC = 2
s1 = shifts[SC][1][1]
s2 = shifts[SC][2][1]
s3 = shifts[SC][3][1]
a = [0] * BC
# temporary work array
t = [0] * BC
# ciphertext to ints + key
for i in range(BC):
t[i] = (ord(ciphertext[i * 4 ]) << 24 |
ord(ciphertext[i * 4 + 1]) << 16 |
ord(ciphertext[i * 4 + 2]) << 8 |
ord(ciphertext[i * 4 + 3]) ) ^ Kd[0][i]
# apply round transforms
for r in range(1, ROUNDS):
for i in range(BC):
a[i] = (T5[(t[ i ] >> 24) & 0xFF] ^
T6[(t[(i + s1) % BC] >> 16) & 0xFF] ^
T7[(t[(i + s2) % BC] >> 8) & 0xFF] ^
T8[ t[(i + s3) % BC] & 0xFF] ) ^ Kd[r][i]
t = copy.copy(a)
# last round is special
result = []
for i in range(BC):
tt = Kd[ROUNDS][i]
result.append((Si[(t[ i ] >> 24) & 0xFF] ^ (tt >> 24)) & 0xFF)
result.append((Si[(t[(i + s1) % BC] >> 16) & 0xFF] ^ (tt >> 16)) & 0xFF)
result.append((Si[(t[(i + s2) % BC] >> 8) & 0xFF] ^ (tt >> 8)) & 0xFF)
result.append((Si[ t[(i + s3) % BC] & 0xFF] ^ tt ) & 0xFF)
return ''.join(map(chr, result))
def encrypt(key, block):
return rijndael(key, len(block)).encrypt(block)
def decrypt(key, block):
return rijndael(key, len(block)).decrypt(block)
def t(kl, bl):
b = 'b' * bl
r = rijndael('a' * kl, bl)
assert r.decrypt(r.encrypt(b)) == b
def multiple_calls(N):
for _ in xrange(N):
t(16, 24)
t(16, 32)
t(24, 16)
t(24, 24)
t(24, 32)
t(32, 16)
t(32, 24)
t(32, 32)
if __name__ == '__main__':
n_repeats = 50
multiple_calls(n_repeats)
| benchmarks/crypto.py | 11,029 | A pure python (slow) implementation of rijndael with a decent interface
To include -
from rijndael import rijndael
To do a key setup -
r = rijndael(key, block_size = 16)
key must be a string of length 16, 24, or 32
blocksize must be 16, 24, or 32. Default is 16
To use -
ciphertext = r.encrypt(plaintext)
plaintext = r.decrypt(ciphertext)
If any strings are of the wrong length a ValueError is thrown
ported from the Java reference code by Bram Cohen, April 2001 this code is public domain, unless someone makes an intellectual property claim against the reference code, in which case it can be made public domain by deleting all the comments and renaming all the variables [keysize][block_size] produce log and alog tables, needed for multiplying in the field GF(2^m) (generator = 3) multiply two elements of GF(2^m) substitution box based on F^{-1}(x) affine transform: box[i] <- B + A*box[i] S-boxes and inverse S-boxes T-boxes round constants encryption round keys decryption round keys copy user material bytes into temporary ints copy values into round key arrays extrapolate using phi (the round key evolution function) copy values into round key arrays inverse MixColumn where needed temporary work array plaintext to ints + key apply round transforms last round is special temporary work array ciphertext to ints + key apply round transforms last round is special | 1,386 | en | 0.776632 |
# (c) 2019, NetApp, Inc
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
''' unit tests for Ansible module: na_ontap_rest_cli'''
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import json
import pytest
from ansible.module_utils import basic
from ansible.module_utils._text import to_bytes
from ansible_collections.netapp.ontap.tests.unit.compat import unittest
from ansible_collections.netapp.ontap.tests.unit.compat.mock import patch
import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
from ansible_collections.netapp.ontap.plugins.modules.na_ontap_rest_cli \
import NetAppONTAPCommandREST as rest_cli_module, main # module under test
if not netapp_utils.has_netapp_lib():
pytestmark = pytest.mark.skip('skipping as missing required netapp_lib')
# REST API canned responses when mocking send_request
SRR = {
# common responses
'is_rest': (200, {}, None),
'empty_good': (200, {}, None),
'end_of_sequence': (500, None, "Ooops, the UT needs one more SRR response"),
'generic_error': (400, None, "Expected error"),
# module specific response
'allow': (200, {'Allow': ['GET', 'WHATEVER']}, None)
}
def set_module_args(args):
"""prepare arguments so that they will be picked up during module creation"""
args = json.dumps({'ANSIBLE_MODULE_ARGS': args})
basic._ANSIBLE_ARGS = to_bytes(args) # pylint: disable=protected-access
class AnsibleExitJson(Exception):
"""Exception class to be raised by module.exit_json and caught by the test case"""
class AnsibleFailJson(Exception):
"""Exception class to be raised by module.fail_json and caught by the test case"""
def exit_json(*args, **kwargs): # pylint: disable=unused-argument
"""function to patch over exit_json; package return data into an exception"""
if 'changed' not in kwargs:
kwargs['changed'] = False
raise AnsibleExitJson(kwargs)
def fail_json(*args, **kwargs): # pylint: disable=unused-argument
"""function to patch over fail_json; package return data into an exception"""
kwargs['failed'] = True
raise AnsibleFailJson(kwargs)
class TestMyModule(unittest.TestCase):
''' Unit tests for na_ontap_job_schedule '''
def setUp(self):
self.mock_module_helper = patch.multiple(basic.AnsibleModule,
exit_json=exit_json,
fail_json=fail_json)
self.mock_module_helper.start()
self.addCleanup(self.mock_module_helper.stop)
def mock_args(self):
return {
'hostname': 'test',
'username': 'test_user',
'password': 'test_pass!',
'https': False,
'command': 'volume',
'verb': 'GET',
'params': {'fields': 'size,percent_used'}
}
def get_cli_mock_object(self):
# For rest, mocking is achieved through side_effect
return rest_cli_module()
def test_module_fail_when_required_args_missing(self):
''' required arguments are reported as errors '''
with pytest.raises(AnsibleFailJson) as exc:
set_module_args({})
rest_cli_module()
print('Info: %s' % exc.value.args[0]['msg'])
@patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request')
def test_rest_cli(self, mock_request):
data = dict(self.mock_args())
set_module_args(data)
mock_request.side_effect = [
SRR['is_rest'],
SRR['empty_good'],
SRR['end_of_sequence']
]
with pytest.raises(AnsibleExitJson) as exc:
self.get_cli_mock_object().apply()
assert exc.value.args[0]['changed']
@patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request')
def test_rest_cli_options(self, mock_request):
data = dict(self.mock_args())
data['verb'] = 'OPTIONS'
set_module_args(data)
mock_request.side_effect = [
SRR['is_rest'],
SRR['allow'],
SRR['end_of_sequence']
]
with pytest.raises(AnsibleExitJson) as exc:
self.get_cli_mock_object().apply()
assert exc.value.args[0]['changed']
assert 'Allow' in exc.value.args[0]['msg']
@patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request')
def test_negative_connection_error(self, mock_request):
data = dict(self.mock_args())
data['verb'] = 'OPTIONS'
set_module_args(data)
mock_request.side_effect = [
SRR['generic_error'],
SRR['end_of_sequence']
]
with pytest.raises(AnsibleFailJson) as exc:
self.get_cli_mock_object().apply()
msg = "failed to connect to REST over test: ['Expected error']. Use na_ontap_command for non-rest CLI."
assert msg in exc.value.args[0]['msg']
@patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request')
def check_verb(self, verb, mock_request):
data = dict(self.mock_args())
data['verb'] = verb
set_module_args(data)
mock_request.side_effect = [
SRR['is_rest'],
SRR['allow'],
SRR['end_of_sequence']
]
with pytest.raises(AnsibleExitJson) as exc:
self.get_cli_mock_object().apply()
assert exc.value.args[0]['changed']
assert 'Allow' in exc.value.args[0]['msg']
assert mock_request.call_args[0][0] == verb
def test_verbs(self):
for verb in ['POST', 'DELETE', 'PATCH', 'OPTIONS', 'PATCH']:
self.check_verb(verb)
@patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request')
def test_negative_verb(self, mock_request):
data = dict(self.mock_args())
data['verb'] = 'GET'
set_module_args(data)
mock_request.side_effect = [
SRR['is_rest'],
SRR['end_of_sequence']
]
uut = self.get_cli_mock_object()
with pytest.raises(AnsibleFailJson) as exc:
uut.verb = 'INVALID'
uut.run_command()
msg = 'Error: unexpected verb INVALID'
assert msg in exc.value.args[0]['msg']
@patch('ansible_collections.netapp.ontap.plugins.module_utils.netapp.OntapRestAPI.send_request')
def test_negative_error(self, mock_request):
data = dict(self.mock_args())
data['verb'] = 'GET'
set_module_args(data)
mock_request.side_effect = [
SRR['is_rest'],
SRR['generic_error'],
SRR['end_of_sequence']
]
with pytest.raises(AnsibleFailJson) as exc:
main()
msg = 'Error: Expected error'
assert msg in exc.value.args[0]['msg']
| venv/lib/python3.6/site-packages/ansible_collections/netapp/ontap/tests/unit/plugins/modules/test_na_ontap_rest_cli.py | 6,968 | Exception class to be raised by module.exit_json and caught by the test case
Exception class to be raised by module.fail_json and caught by the test case
Unit tests for na_ontap_job_schedule
function to patch over exit_json; package return data into an exception
function to patch over fail_json; package return data into an exception
prepare arguments so that they will be picked up during module creation
required arguments are reported as errors
unit tests for Ansible module: na_ontap_rest_cli
(c) 2019, NetApp, Inc GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) module under test REST API canned responses when mocking send_request common responses module specific response pylint: disable=protected-access pylint: disable=unused-argument pylint: disable=unused-argument For rest, mocking is achieved through side_effect | 873 | en | 0.760083 |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Some common SessionRunHook classes.
@@
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import time
import numpy as np
import six
from tensorflow.contrib.framework.python.ops import variables as contrib_variables
from tensorflow.contrib.learn.python.learn import session_run_hook
from tensorflow.contrib.learn.python.learn.session_run_hook import SessionRunArgs
from tensorflow.contrib.learn.python.learn.summary_writer_cache import SummaryWriterCache
from tensorflow.core.framework.summary_pb2 import Summary
from tensorflow.core.util.event_pb2 import SessionLog
from tensorflow.python.framework import ops
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training import training_util
class LoggingTensorHook(session_run_hook.SessionRunHook):
"""Prints given tensors every N iteration.
The tensors will be printed to the log, with `INFO` severity.
"""
def __init__(self, tensors, every_n_iter=100):
"""Initializes a LoggingHook monitor.
Args:
tensors: `dict` of tag to tensors/names or
`iterable` of tensors/names.
every_n_iter: `int`, print every N iteration.
Raises:
ValueError: if `every_n_iter` is non-positive.
"""
if every_n_iter <= 0:
raise ValueError("Invalid every_n_iter=%s." % every_n_iter)
if not isinstance(tensors, dict):
tensors = {item: item for item in tensors}
self._tensors = tensors
self._every_n_iter = every_n_iter
def begin(self):
self._iter_count = 0
# Convert names to tensors if given
self._current_tensors = {tag: _as_graph_element(tensor)
for (tag, tensor) in self._tensors.items()}
def before_run(self, run_context): # pylint: disable=unused-argument
if self._iter_count % self._every_n_iter == 0:
return SessionRunArgs(self._current_tensors)
else:
return None
def after_run(self, run_context, run_values):
_ = run_context
if self._iter_count % self._every_n_iter == 0:
stats = []
for tag in sorted(self._current_tensors.keys()):
stats.append("%s = %s" % (tag, run_values.results[tag]))
logging.info("%s", ", ".join(stats))
self._iter_count += 1
class StopAtStepHook(session_run_hook.SessionRunHook):
"""Monitor to request stop at a specified step."""
def __init__(self, num_steps=None, last_step=None):
"""Create a StopAtStep Hook.
This hook requests stop after either a number of steps have been
executed or a last step has been reached. Only of the two options can be
specified.
if `num_steps` is specified, it indicates the number of steps to execute
after `begin()` is called. If instead `last_step` is specified, it
indicates the last step we want to execute, as passed to the `after_run()`
call.
Args:
num_steps: Number of steps to execute.
last_step: Step after which to stop.
Raises:
ValueError: If one of the arguments is invalid.
"""
if num_steps is None and last_step is None:
raise ValueError("One of num_steps or last_step must be specified.")
if num_steps is not None and last_step is not None:
raise ValueError("Only one of num_steps or last_step can be specified.")
self._num_steps = num_steps
self._last_step = last_step
def begin(self):
self._global_step_tensor = contrib_variables.get_global_step()
if self._global_step_tensor is None:
raise RuntimeError("Global step should be created to use StopAtStepHook.")
def before_run(self, run_context): # pylint: disable=unused-argument
return SessionRunArgs(self._global_step_tensor)
def after_run(self, run_context, run_values):
global_step = run_values.results
if self._last_step is None:
self._last_step = global_step + self._num_steps - 1
if global_step >= self._last_step:
run_context.request_stop()
class CheckpointSaverHook(session_run_hook.SessionRunHook):
"""Saves checkpoints every N steps or seconds."""
def __init__(self,
checkpoint_dir,
save_secs=None,
save_steps=None,
saver=None,
checkpoint_basename="model.ckpt",
scaffold=None):
"""Initialize CheckpointSaverHook monitor.
Args:
checkpoint_dir: `str`, base directory for the checkpoint files.
save_secs: `int`, save every N secs.
save_steps: `int`, save every N steps.
saver: `Saver` object, used for saving.
checkpoint_basename: `str`, base name for the checkpoint files.
scaffold: `Scaffold`, use to get saver object.
Raises:
ValueError: One of `save_steps` or `save_secs` should be set.
"""
logging.info("Create CheckpointSaverHook.")
self._saver = saver
self._checkpoint_dir = checkpoint_dir
self._summary_writer = SummaryWriterCache.get(checkpoint_dir)
self._save_path = os.path.join(checkpoint_dir, checkpoint_basename)
self._scaffold = scaffold
self._save_secs = save_secs
self._save_steps = save_steps
self._last_saved_time = None
self._last_saved_step = None
if save_steps is None and save_secs is None:
raise ValueError("Either save_steps or save_secs should be provided")
if (save_steps is not None) and (save_secs is not None):
raise ValueError("Can not provide both save_steps and save_secs.")
def begin(self):
self._last_saved_time = None
self._last_saved_step = None
self._global_step_tensor = contrib_variables.get_global_step()
if self._global_step_tensor is None:
raise RuntimeError(
"Global step should be created to use CheckpointSaverHook.")
def before_run(self, run_context): # pylint: disable=unused-argument
if self._last_saved_time is None:
# Write graph in the first call.
training_util.write_graph(
ops.get_default_graph().as_graph_def(add_shapes=True),
self._checkpoint_dir,
"graph.pbtxt")
self._summary_writer.add_graph(ops.get_default_graph())
return SessionRunArgs(self._global_step_tensor)
def after_run(self, run_context, run_values):
global_step = run_values.results
if self._last_saved_time is None:
self._save(global_step, run_context.session)
if self._save_steps is not None:
if global_step >= self._last_saved_step + self._save_steps:
self._save(global_step, run_context.session)
if self._save_secs is not None:
if time.time() >= self._last_saved_time + self._save_secs:
self._save(global_step, run_context.session)
def end(self, session):
last_step = session.run(contrib_variables.get_global_step())
self._save(last_step, session)
def _save(self, step, session):
"""Saves the latest checkpoint."""
if step == self._last_saved_step:
return
logging.info("Saving checkpoints for %d into %s.", step, self._save_path)
self._last_saved_time = time.time()
self._last_saved_step = step
if self._saver is None:
self._scaffold.saver.save(session, self._save_path, global_step=step)
else:
self._saver.save(session, self._save_path, global_step=step)
self._summary_writer.add_session_log(
SessionLog(
status=SessionLog.CHECKPOINT, checkpoint_path=self._save_path),
step)
class StepCounterHook(session_run_hook.SessionRunHook):
"""Steps per second monitor."""
def __init__(self, every_n_steps=100, output_dir=None, summary_writer=None):
self._summary_tag = "global_step/sec"
self._every_n_steps = every_n_steps
self._summary_writer = summary_writer
if summary_writer is None and output_dir:
self._summary_writer = SummaryWriterCache.get(output_dir)
def begin(self):
self._last_reported_time = None
self._last_reported_step = None
self._global_step_tensor = contrib_variables.get_global_step()
if self._global_step_tensor is None:
raise RuntimeError(
"Global step should be created to use StepCounterHook.")
def before_run(self, run_context): # pylint: disable=unused-argument
return SessionRunArgs(self._global_step_tensor)
def after_run(self, run_context, run_values):
_ = run_context
if not self._summary_writer:
return
global_step = run_values.results
current_time = time.time()
if self._last_reported_time is None:
self._last_reported_step = global_step
self._last_reported_time = current_time
else:
if global_step >= self._every_n_steps + self._last_reported_step:
added_steps = global_step - self._last_reported_step
elapsed_time = current_time - self._last_reported_time
steps_per_sec = added_steps / elapsed_time
summary = Summary(value=[Summary.Value(
tag=self._summary_tag, simple_value=steps_per_sec)])
self._summary_writer.add_summary(summary, global_step)
self._last_reported_step = global_step
self._last_reported_time = current_time
class NanLossDuringTrainingError(RuntimeError):
def __str__(self):
return "NaN loss during training."
class NanTensorHook(session_run_hook.SessionRunHook):
"""NaN Loss monitor.
Monitors loss and stops training if loss is NaN.
Can either fail with exception or just stop training.
"""
def __init__(self, loss_tensor, fail_on_nan_loss=True):
"""Initializes NanLoss monitor.
Args:
loss_tensor: `Tensor`, the loss tensor.
fail_on_nan_loss: `bool`, whether to raise exception when loss is NaN.
"""
self._loss_tensor = loss_tensor
self._fail_on_nan_loss = fail_on_nan_loss
def before_run(self, run_context): # pylint: disable=unused-argument
return SessionRunArgs(self._loss_tensor)
def after_run(self, run_context, run_values):
if np.isnan(run_values.results):
failure_message = "Model diverged with loss = NaN."
if self._fail_on_nan_loss:
logging.error(failure_message)
raise NanLossDuringTrainingError
else:
logging.warning(failure_message)
# We don't raise an error but we request stop without an exception.
run_context.request_stop()
class SummarySaverHook(session_run_hook.SessionRunHook):
"""Saves summaries every N steps."""
def __init__(self,
save_steps=100,
output_dir=None,
summary_writer=None,
scaffold=None,
summary_op=None):
"""Initializes a `SummarySaver` monitor.
Args:
save_steps: `int`, save summaries every N steps. See `EveryN`.
output_dir: `string`, the directory to save the summaries to. Only used
if no `summary_writer` is supplied.
summary_writer: `SummaryWriter`. If `None` and an `output_dir` was passed,
one will be created accordingly.
scaffold: `Scaffold` to get summary_op if it's not provided.
summary_op: `Tensor` of type `string`. A serialized `Summary` protocol
buffer, as output by TF summary methods like `scalar_summary` or
`merge_all_summaries`.
"""
# TODO(ipolosukhin): Implement every N seconds.
self._summary_op = summary_op
self._summary_writer = summary_writer
if summary_writer is None and output_dir:
self._summary_writer = SummaryWriterCache.get(output_dir)
self._scaffold = scaffold
self._save_steps = save_steps
# TODO(mdan): Throw an error if output_dir and summary_writer are None.
def begin(self):
self._last_saved_step = None
self._request_summary = True
self._global_step_tensor = contrib_variables.get_global_step()
if self._global_step_tensor is None:
raise RuntimeError(
"Global step should be created to use SummarySaverHook.")
def before_run(self, run_context): # pylint: disable=unused-argument
requests = {"global_step": self._global_step_tensor}
if self._request_summary:
if self._summary_op is not None:
requests["summary"] = self._summary_op
elif self._scaffold.summary_op is not None:
requests["summary"] = self._scaffold.summary_op
return SessionRunArgs(requests)
def after_run(self, run_context, run_values):
_ = run_context
if not self._summary_writer:
return
global_step = run_values.results["global_step"]
if self._last_saved_step is None:
self._summary_writer.add_session_log(
SessionLog(status=SessionLog.START), global_step)
if self._request_summary:
self._last_saved_step = global_step
if "summary" in run_values.results:
self._summary_writer.add_summary(run_values.results["summary"],
global_step)
self._request_summary = (
global_step >= self._last_saved_step + self._save_steps - 1)
def end(self, session=None):
if self._summary_writer:
self._summary_writer.flush()
def _as_graph_element(obj):
"""Retrieves Graph element."""
graph = ops.get_default_graph()
if not isinstance(obj, six.string_types):
if not hasattr(obj, "graph") or obj.graph != graph:
raise ValueError("Passed %s should have graph attribute that is equal "
"to current graph %s." % (obj, graph))
return obj
if ":" in obj:
element = graph.as_graph_element(obj)
else:
element = graph.as_graph_element(obj + ":0")
# Check that there is no :1 (e.g. it's single output).
try:
graph.as_graph_element(obj + ":1")
except (KeyError, ValueError):
pass
else:
raise ValueError("Name %s is ambiguous, "
"as this `Operation` has multiple outputs "
"(at least 2)." % obj)
return element
| tensorflow/contrib/learn/python/learn/basic_session_run_hooks.py | 14,433 | Saves checkpoints every N steps or seconds.
Prints given tensors every N iteration.
The tensors will be printed to the log, with `INFO` severity.
NaN Loss monitor.
Monitors loss and stops training if loss is NaN.
Can either fail with exception or just stop training.
Steps per second monitor.
Monitor to request stop at a specified step.
Saves summaries every N steps.
Initializes a LoggingHook monitor.
Args:
tensors: `dict` of tag to tensors/names or
`iterable` of tensors/names.
every_n_iter: `int`, print every N iteration.
Raises:
ValueError: if `every_n_iter` is non-positive.
Create a StopAtStep Hook.
This hook requests stop after either a number of steps have been
executed or a last step has been reached. Only of the two options can be
specified.
if `num_steps` is specified, it indicates the number of steps to execute
after `begin()` is called. If instead `last_step` is specified, it
indicates the last step we want to execute, as passed to the `after_run()`
call.
Args:
num_steps: Number of steps to execute.
last_step: Step after which to stop.
Raises:
ValueError: If one of the arguments is invalid.
Initialize CheckpointSaverHook monitor.
Args:
checkpoint_dir: `str`, base directory for the checkpoint files.
save_secs: `int`, save every N secs.
save_steps: `int`, save every N steps.
saver: `Saver` object, used for saving.
checkpoint_basename: `str`, base name for the checkpoint files.
scaffold: `Scaffold`, use to get saver object.
Raises:
ValueError: One of `save_steps` or `save_secs` should be set.
Initializes NanLoss monitor.
Args:
loss_tensor: `Tensor`, the loss tensor.
fail_on_nan_loss: `bool`, whether to raise exception when loss is NaN.
Initializes a `SummarySaver` monitor.
Args:
save_steps: `int`, save summaries every N steps. See `EveryN`.
output_dir: `string`, the directory to save the summaries to. Only used
if no `summary_writer` is supplied.
summary_writer: `SummaryWriter`. If `None` and an `output_dir` was passed,
one will be created accordingly.
scaffold: `Scaffold` to get summary_op if it's not provided.
summary_op: `Tensor` of type `string`. A serialized `Summary` protocol
buffer, as output by TF summary methods like `scalar_summary` or
`merge_all_summaries`.
Retrieves Graph element.
Saves the latest checkpoint.
Some common SessionRunHook classes.
@@
Copyright 2016 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ============================================================================== Convert names to tensors if given pylint: disable=unused-argument pylint: disable=unused-argument pylint: disable=unused-argument Write graph in the first call. pylint: disable=unused-argument pylint: disable=unused-argument We don't raise an error but we request stop without an exception. TODO(ipolosukhin): Implement every N seconds. TODO(mdan): Throw an error if output_dir and summary_writer are None. pylint: disable=unused-argument Check that there is no :1 (e.g. it's single output). | 3,544 | en | 0.78129 |
import unittest
from my_lambdata.assignment1 import WrangledFrame
class TestWrangledFrame(unittest.TestCase):
def test_add_state_names(self):
wf = WrangledFrame({"abbrev": ["CA", "CO", "CT", "DC", "TX"]})
breakpoint()
wf.add_state_names()
# ensure there is a "name" column
self.assertEqual(list(wf.columns), ['abbrev', 'name'])
# ensure the values of WF are specific classes/values
# (string, "California")
self.assertEqual(wf["name"][0], "California")
self.assertEqual(wf["abbrev"][0], "CA")
if __name__ == '__main__':
unittest.main()
| tests/wrangled_test.py | 623 | ensure there is a "name" column ensure the values of WF are specific classes/values (string, "California") | 106 | en | 0.653227 |
# -*- coding:utf-8 -*-
"""
博客系统。
"""
import pymysql
pymysql.install_as_MySQLdb()
| end/nebulablogs/__init__.py | 95 | 博客系统。
-*- coding:utf-8 -*- | 28 | zh | 0.797089 |
# -*- coding: utf-8 -*-
import pytest
import tempfile
from jsonschema import ValidationError
from rasa.nlu import training_data
from rasa.nlu.convert import convert_training_data
from rasa.nlu.extractors.mitie_entity_extractor import MitieEntityExtractor
from rasa.nlu.tokenizers.whitespace_tokenizer import WhitespaceTokenizer
from rasa.nlu.training_data import TrainingData
from rasa.nlu.training_data.formats import MarkdownReader
from rasa.nlu.training_data.formats.rasa import validate_rasa_nlu_data
from rasa.nlu.training_data.loading import guess_format, UNK, load_data
from rasa.nlu.training_data.util import get_file_format
import rasa.utils.io as io_utils
def test_example_training_data_is_valid():
demo_json = "data/examples/rasa/demo-rasa.json"
data = io_utils.read_json_file(demo_json)
validate_rasa_nlu_data(data)
@pytest.mark.parametrize(
"invalid_data",
[
{"wrong_top_level": []},
["this is not a toplevel dict"],
{
"rasa_nlu_data": {
"common_examples": [{"intent": "some example without text"}]
}
},
{
"rasa_nlu_data": {
"common_examples": [
{
"text": "mytext",
"entities": [{"start": "INVALID", "end": 0, "entity": "x"}],
}
]
}
},
],
)
def test_validation_is_throwing_exceptions(invalid_data):
with pytest.raises(ValidationError):
validate_rasa_nlu_data(invalid_data)
def test_luis_data():
td = training_data.load_data("data/examples/luis/demo-restaurants.json")
assert not td.is_empty()
assert len(td.entity_examples) == 8
assert len(td.intent_examples) == 28
assert len(td.training_examples) == 28
assert td.entity_synonyms == {}
assert td.intents == {"affirm", "goodbye", "greet", "inform"}
assert td.entities == {"location", "cuisine"}
def test_wit_data():
td = training_data.load_data("data/examples/wit/demo-flights.json")
assert not td.is_empty()
assert len(td.entity_examples) == 4
assert len(td.intent_examples) == 1
assert len(td.training_examples) == 4
assert td.entity_synonyms == {}
assert td.intents == {"flight_booking"}
assert td.entities == {"location", "datetime"}
def test_dialogflow_data():
td = training_data.load_data("data/examples/dialogflow/")
assert not td.is_empty()
assert len(td.entity_examples) == 5
assert len(td.intent_examples) == 24
assert len(td.training_examples) == 24
assert len(td.lookup_tables) == 2
assert td.intents == {"affirm", "goodbye", "hi", "inform"}
assert td.entities == {"cuisine", "location"}
non_trivial_synonyms = {k: v for k, v in td.entity_synonyms.items() if k != v}
assert non_trivial_synonyms == {
"mexico": "mexican",
"china": "chinese",
"india": "indian",
}
# The order changes based on different computers hence the grouping
assert {td.lookup_tables[0]["name"], td.lookup_tables[1]["name"]} == {
"location",
"cuisine",
}
assert {
len(td.lookup_tables[0]["elements"]),
len(td.lookup_tables[1]["elements"]),
} == {4, 6}
def test_lookup_table_json():
lookup_fname = "data/test/lookup_tables/plates.txt"
td_lookup = training_data.load_data("data/test/lookup_tables/lookup_table.json")
assert not td_lookup.is_empty()
assert td_lookup.lookup_tables[0]["name"] == "plates"
assert td_lookup.lookup_tables[0]["elements"] == lookup_fname
assert td_lookup.lookup_tables[1]["name"] == "drinks"
assert td_lookup.lookup_tables[1]["elements"] == [
"mojito",
"lemonade",
"sweet berry wine",
"tea",
"club mate",
]
def test_lookup_table_md():
lookup_fname = "data/test/lookup_tables/plates.txt"
td_lookup = training_data.load_data("data/test/lookup_tables/lookup_table.md")
assert not td_lookup.is_empty()
assert td_lookup.lookup_tables[0]["name"] == "plates"
assert td_lookup.lookup_tables[0]["elements"] == lookup_fname
assert td_lookup.lookup_tables[1]["name"] == "drinks"
assert td_lookup.lookup_tables[1]["elements"] == [
"mojito",
"lemonade",
"sweet berry wine",
"tea",
"club mate",
]
@pytest.mark.parametrize(
"files",
[
[
"data/examples/rasa/demo-rasa.json",
"data/examples/rasa/demo-rasa-responses.md",
],
[
"data/examples/rasa/demo-rasa.md",
"data/examples/rasa/demo-rasa-responses.md",
],
],
)
def test_demo_data(files):
from rasa.importers.utils import training_data_from_paths
td = training_data_from_paths(files, language="en")
assert td.intents == {"affirm", "greet", "restaurant_search", "goodbye", "chitchat"}
assert td.entities == {"location", "cuisine"}
assert td.responses == {"I am Mr. Bot", "It's sunny where I live"}
assert len(td.training_examples) == 46
assert len(td.intent_examples) == 46
assert len(td.response_examples) == 4
assert len(td.entity_examples) == 11
assert len(td.nlg_stories) == 2
assert td.entity_synonyms == {
"Chines": "chinese",
"Chinese": "chinese",
"chines": "chinese",
"vegg": "vegetarian",
"veggie": "vegetarian",
}
assert td.regex_features == [
{"name": "greet", "pattern": r"hey[^\s]*"},
{"name": "zipcode", "pattern": r"[0-9]{5}"},
]
@pytest.mark.parametrize(
"filepaths",
[["data/examples/rasa/demo-rasa.md", "data/examples/rasa/demo-rasa-responses.md"]],
)
def test_train_test_split(filepaths):
from rasa.importers.utils import training_data_from_paths
td = training_data_from_paths(filepaths, language="en")
assert td.intents == {"affirm", "greet", "restaurant_search", "goodbye", "chitchat"}
assert td.entities == {"location", "cuisine"}
assert len(td.training_examples) == 46
assert len(td.intent_examples) == 46
td_train, td_test = td.train_test_split(train_frac=0.8)
assert len(td_train.training_examples) == 35
assert len(td_test.training_examples) == 11
@pytest.mark.parametrize(
"files",
[
("data/examples/rasa/demo-rasa.json", "data/test/multiple_files_json"),
("data/examples/rasa/demo-rasa.md", "data/test/multiple_files_markdown"),
],
)
def test_data_merging(files):
td_reference = training_data.load_data(files[0])
td = training_data.load_data(files[1])
assert len(td.entity_examples) == len(td_reference.entity_examples)
assert len(td.intent_examples) == len(td_reference.intent_examples)
assert len(td.training_examples) == len(td_reference.training_examples)
assert td.intents == td_reference.intents
assert td.entities == td_reference.entities
assert td.entity_synonyms == td_reference.entity_synonyms
assert td.regex_features == td_reference.regex_features
def test_markdown_single_sections():
td_regex_only = training_data.load_data(
"data/test/markdown_single_sections/regex_only.md"
)
assert td_regex_only.regex_features == [{"name": "greet", "pattern": r"hey[^\s]*"}]
td_syn_only = training_data.load_data(
"data/test/markdown_single_sections/synonyms_only.md"
)
assert td_syn_only.entity_synonyms == {"Chines": "chinese", "Chinese": "chinese"}
def test_repeated_entities():
data = """
{
"rasa_nlu_data": {
"common_examples" : [
{
"text": "book a table today from 3 to 6 for 3 people",
"intent": "unk",
"entities": [
{
"entity": "description",
"start": 35,
"end": 36,
"value": "3"
}
]
}
]
}
}"""
with tempfile.NamedTemporaryFile(suffix="_tmp_training_data.json") as f:
f.write(data.encode("utf-8"))
f.flush()
td = training_data.load_data(f.name)
assert len(td.entity_examples) == 1
example = td.entity_examples[0]
entities = example.get("entities")
assert len(entities) == 1
tokens = WhitespaceTokenizer().tokenize(example.text)
start, end = MitieEntityExtractor.find_entity(entities[0], example.text, tokens)
assert start == 9
assert end == 10
def test_multiword_entities():
data = """
{
"rasa_nlu_data": {
"common_examples" : [
{
"text": "show me flights to New York City",
"intent": "unk",
"entities": [
{
"entity": "destination",
"start": 19,
"end": 32,
"value": "New York City"
}
]
}
]
}
}"""
with tempfile.NamedTemporaryFile(suffix="_tmp_training_data.json") as f:
f.write(data.encode("utf-8"))
f.flush()
td = training_data.load_data(f.name)
assert len(td.entity_examples) == 1
example = td.entity_examples[0]
entities = example.get("entities")
assert len(entities) == 1
tokens = WhitespaceTokenizer().tokenize(example.text)
start, end = MitieEntityExtractor.find_entity(entities[0], example.text, tokens)
assert start == 4
assert end == 7
def test_nonascii_entities():
data = """
{
"luis_schema_version": "2.0",
"utterances" : [
{
"text": "I am looking for a ßäæ ?€ö) item",
"intent": "unk",
"entities": [
{
"entity": "description",
"startPos": 19,
"endPos": 26
}
]
}
]
}"""
with tempfile.NamedTemporaryFile(suffix="_tmp_training_data.json") as f:
f.write(data.encode("utf-8"))
f.flush()
td = training_data.load_data(f.name)
assert len(td.entity_examples) == 1
example = td.entity_examples[0]
entities = example.get("entities")
assert len(entities) == 1
entity = entities[0]
assert entity["value"] == "ßäæ ?€ö)"
assert entity["start"] == 19
assert entity["end"] == 27
assert entity["entity"] == "description"
def test_entities_synonyms():
data = """
{
"rasa_nlu_data": {
"entity_synonyms": [
{
"value": "nyc",
"synonyms": ["New York City", "nyc", "the big apple"]
}
],
"common_examples" : [
{
"text": "show me flights to New York City",
"intent": "unk",
"entities": [
{
"entity": "destination",
"start": 19,
"end": 32,
"value": "NYC"
}
]
},
{
"text": "show me flights to nyc",
"intent": "unk",
"entities": [
{
"entity": "destination",
"start": 19,
"end": 22,
"value": "nyc"
}
]
}
]
}
}"""
with tempfile.NamedTemporaryFile(suffix="_tmp_training_data.json") as f:
f.write(data.encode("utf-8"))
f.flush()
td = training_data.load_data(f.name)
assert td.entity_synonyms["New York City"] == "nyc"
def cmp_message_list(firsts, seconds):
assert len(firsts) == len(seconds), "Message lists have unequal length"
def cmp_dict_list(firsts, seconds):
if len(firsts) != len(seconds):
return False
for a in firsts:
for idx, b in enumerate(seconds):
if hash(a) == hash(b):
del seconds[idx]
break
else:
others = ", ".join([e.text for e in seconds])
assert False, "Failed to find message {} in {}".format(a.text, others)
return not seconds
@pytest.mark.parametrize(
"data_file,gold_standard_file,output_format,language",
[
(
"data/examples/wit/demo-flights.json",
"data/test/wit_converted_to_rasa.json",
"json",
None,
),
(
"data/examples/luis/demo-restaurants.json",
"data/test/luis_converted_to_rasa.json",
"json",
None,
),
(
"data/examples/dialogflow/",
"data/test/dialogflow_en_converted_to_rasa.json",
"json",
"en",
),
(
"data/examples/dialogflow/",
"data/test/dialogflow_es_converted_to_rasa.json",
"json",
"es",
),
(
"data/examples/rasa/demo-rasa.md",
"data/test/md_converted_to_json.json",
"json",
None,
),
(
"data/examples/rasa/demo-rasa.json",
"data/test/json_converted_to_md.md",
"md",
None,
),
(
"data/test/training_data_containing_special_chars.json",
"data/test/json_with_special_chars_convered_to_md.md",
"md",
None,
),
],
)
def test_training_data_conversion(
tmpdir, data_file, gold_standard_file, output_format, language
):
out_path = tmpdir.join("rasa_nlu_data.json")
convert_training_data(data_file, out_path.strpath, output_format, language)
td = training_data.load_data(out_path.strpath, language)
assert td.entity_examples != []
assert td.intent_examples != []
gold_standard = training_data.load_data(gold_standard_file, language)
cmp_message_list(td.entity_examples, gold_standard.entity_examples)
cmp_message_list(td.intent_examples, gold_standard.intent_examples)
assert td.entity_synonyms == gold_standard.entity_synonyms
# converting the converted file back to original
# file format and performing the same tests
rto_path = tmpdir.join("data_in_original_format.txt")
convert_training_data(out_path.strpath, rto_path.strpath, "json", language)
rto = training_data.load_data(rto_path.strpath, language)
cmp_message_list(gold_standard.entity_examples, rto.entity_examples)
cmp_message_list(gold_standard.intent_examples, rto.intent_examples)
assert gold_standard.entity_synonyms == rto.entity_synonyms
# If the above assert fails - this can be used
# to dump to the file and diff using git
# with io.open(gold_standard_file) as f:
# f.write(td.as_json(indent=2))
def test_url_data_format():
data = """
{
"rasa_nlu_data": {
"entity_synonyms": [
{
"value": "nyc",
"synonyms": ["New York City", "nyc", "the big apple"]
}
],
"common_examples" : [
{
"text": "show me flights to New York City",
"intent": "unk",
"entities": [
{
"entity": "destination",
"start": 19,
"end": 32,
"value": "NYC"
}
]
}
]
}
}"""
fname = io_utils.create_temporary_file(
data.encode("utf-8"), suffix="_tmp_training_data.json", mode="w+b"
)
data = io_utils.read_json_file(fname)
assert data is not None
validate_rasa_nlu_data(data)
def test_markdown_entity_regex():
r = MarkdownReader()
md = """
## intent:restaurant_search
- i'm looking for a place to eat
- i'm looking for a place in the [north](loc-direction) of town
- show me [chines](cuisine:chinese) restaurants
- show me [chines](22_ab-34*3.A:43er*+?df) restaurants
"""
result = r.reads(md)
assert len(result.training_examples) == 4
first = result.training_examples[0]
assert first.data == {"intent": "restaurant_search"}
assert first.text == "i'm looking for a place to eat"
second = result.training_examples[1]
assert second.data == {
"intent": "restaurant_search",
"entities": [
{"start": 31, "end": 36, "value": "north", "entity": "loc-direction"}
],
}
assert second.text == "i'm looking for a place in the north of town"
third = result.training_examples[2]
assert third.data == {
"intent": "restaurant_search",
"entities": [{"start": 8, "end": 14, "value": "chinese", "entity": "cuisine"}],
}
assert third.text == "show me chines restaurants"
fourth = result.training_examples[3]
assert fourth.data == {
"intent": "restaurant_search",
"entities": [
{"start": 8, "end": 14, "value": "43er*+?df", "entity": "22_ab-34*3.A"}
],
}
assert fourth.text == "show me chines restaurants"
def test_get_file_format():
fformat = get_file_format("data/examples/luis/demo-restaurants.json")
assert fformat == "json"
fformat = get_file_format("data/examples")
assert fformat == "json"
fformat = get_file_format("examples/restaurantbot/data/nlu.md")
assert fformat == "md"
with pytest.raises(AttributeError):
get_file_format("path-does-not-exists")
with pytest.raises(AttributeError):
get_file_format(None)
def test_guess_format_from_non_existing_file_path():
assert guess_format("not existing path") == UNK
def test_load_data_from_non_existing_file():
with pytest.raises(ValueError):
load_data("some path")
def test_is_empty():
assert TrainingData().is_empty()
def test_markdown_empty_section():
data = training_data.load_data(
"data/test/markdown_single_sections/empty_section.md"
)
assert data.regex_features == [{"name": "greet", "pattern": r"hey[^\s]*"}]
assert not data.entity_synonyms
assert len(data.lookup_tables) == 1
assert data.lookup_tables[0]["name"] == "chinese"
assert "Chinese" in data.lookup_tables[0]["elements"]
assert "Chines" in data.lookup_tables[0]["elements"]
def test_markdown_not_existing_section():
with pytest.raises(ValueError):
training_data.load_data(
"data/test/markdown_single_sections/not_existing_section.md"
)
| tests/nlu/base/test_training_data.py | 17,950 | -*- coding: utf-8 -*- The order changes based on different computers hence the grouping converting the converted file back to original file format and performing the same tests If the above assert fails - this can be used to dump to the file and diff using git with io.open(gold_standard_file) as f: f.write(td.as_json(indent=2)) | 333 | en | 0.88665 |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class SasDefinitionCreateParameters(Model):
"""The SAS definition create parameters.
All required parameters must be populated in order to send to Azure.
:param template_uri: Required. The SAS definition token template signed
with an arbitrary key. Tokens created according to the SAS definition
will have the same properties as the template.
:type template_uri: str
:param sas_type: Required. The type of SAS token the SAS definition will
create. Possible values include: 'account', 'service'
:type sas_type: str or ~storage.models.SasTokenType
:param validity_period: Required. The validity period of SAS tokens
created according to the SAS definition.
:type validity_period: str
:param sas_definition_attributes: The attributes of the SAS definition.
:type sas_definition_attributes: ~storage.models.SasDefinitionAttributes
:param tags: Application specific metadata in the form of key-value pairs.
:type tags: dict[str, str]
"""
_validation = {
'template_uri': {'required': True},
'sas_type': {'required': True},
'validity_period': {'required': True},
}
_attribute_map = {
'template_uri': {'key': 'templateUri', 'type': 'str'},
'sas_type': {'key': 'sasType', 'type': 'str'},
'validity_period': {'key': 'validityPeriod', 'type': 'str'},
'sas_definition_attributes': {'key': 'attributes', 'type': 'SasDefinitionAttributes'},
'tags': {'key': 'tags', 'type': '{str}'},
}
def __init__(self, **kwargs):
super(SasDefinitionCreateParameters, self).__init__(**kwargs)
self.template_uri = kwargs.get('template_uri', None)
self.sas_type = kwargs.get('sas_type', None)
self.validity_period = kwargs.get('validity_period', None)
self.sas_definition_attributes = kwargs.get('sas_definition_attributes', None)
self.tags = kwargs.get('tags', None)
| sdk/keyvault/azure-keyvault/azure/keyvault/v7_3_preview/models/sas_definition_create_parameters.py | 2,461 | The SAS definition create parameters.
All required parameters must be populated in order to send to Azure.
:param template_uri: Required. The SAS definition token template signed
with an arbitrary key. Tokens created according to the SAS definition
will have the same properties as the template.
:type template_uri: str
:param sas_type: Required. The type of SAS token the SAS definition will
create. Possible values include: 'account', 'service'
:type sas_type: str or ~storage.models.SasTokenType
:param validity_period: Required. The validity period of SAS tokens
created according to the SAS definition.
:type validity_period: str
:param sas_definition_attributes: The attributes of the SAS definition.
:type sas_definition_attributes: ~storage.models.SasDefinitionAttributes
:param tags: Application specific metadata in the form of key-value pairs.
:type tags: dict[str, str]
coding=utf-8 -------------------------------------------------------------------------- Copyright (c) Microsoft Corporation. All rights reserved. Licensed under the MIT License. See License.txt in the project root for license information. Code generated by Microsoft (R) AutoRest Code Generator. Changes may cause incorrect behavior and will be lost if the code is regenerated. -------------------------------------------------------------------------- | 1,343 | en | 0.548296 |
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
from iris.test_case import *
class Test(BaseTest):
def __init__(self, app):
BaseTest.__init__(self, app)
self.meta = 'This is a test for checking private browsing navigation'
def run(self):
url = 'https://www.google.com/?hl=EN'
private_browsing_image = 'private_browsing.png'
google_search_image = 'google_search.png'
# check if incognito mode works
new_private_window()
expected_1 = exists(private_browsing_image, 10)
assert_true(self, expected_1, 'Find private browsing image')
# check basic_url in incognito mode
navigate(url)
expected_2 = exists(google_search_image, 10)
assert_true(self, expected_2, 'Find google search image')
| iris/tests/experiments/private_browsing_mode.py | 954 | This Source Code Form is subject to the terms of the Mozilla Public License, v. 2.0. If a copy of the MPL was not distributed with this file, You can obtain one at http://mozilla.org/MPL/2.0/. check if incognito mode works check basic_url in incognito mode | 256 | en | 0.875146 |
from flask import Flask, request, abort
from linebot import (
LineBotApi, WebhookHandler
)
from linebot.exceptions import (
InvalidSignatureError
)
from linebot.models import *
app = Flask(__name__)
# LINE BOT info
line_bot_api = LineBotApi('QCynFfsDk7My1YN72sVQyvk6ArYkD2TUQW/pUxUQqllnGFNcqjZ8tKC+qMcVa2u4Lg1WmdUVLcS124tweaXtcVWLmK/thFH1NFUZL/Olev6ugLeKG4VUVd0ee8VUUgnrqqCZD+ZBpD6j61TRW2eJEgdB04t89/1O/w1cDnyilFU=')
handler = WebhookHandler('f3b5d7b57ef1f4d1277aecd7f045db3d')
@app.route("/callback", methods=['POST'])
def callback():
signature = request.headers['X-Line-Signature']
body = request.get_data(as_text=True)
app.logger.info("Request body: " + body)
print(body)
try:
handler.handle(body, signature)
except InvalidSignatureError:
abort(400)
return 'OK'
# Message event
@handler.add(MessageEvent)
def handle_message(event):
message_type = event.message.type
user_id = event.source.user_id
reply_token = event.reply_token
message = event.message.text
line_bot_api.reply_message(reply_token, TextSendMessage(text = message))
import os
if __name__ == "__main__":
port = int(os.environ.get('PORT', 80))
app.run(host='0.0.0.0', port=port) | Final_Project/hx711py/lineBotTest.py | 1,229 | LINE BOT info Message event | 27 | en | 0.130694 |
#!/usr/bin/env python3
# still in development
#
import asyncio
import websockets
import json
import requests
eventsAPIPath = '/api/v1/events'
localServerIP = '0.0.0.0'
localServerAPIPort = '8000'
localServerWSPort = '8000'
localServerPath = '/sealog-server'
localToken = 'eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJpZCI6IjU5ODFmMTY3MjEyYjM0OGFlZDdmYTlmNSIsInNjb3BlIjpbImFkbWluIiwiZXZlbnRfbWFuYWdlciIsImV2ZW50X2xvZ2dlciIsImV2ZW50X3dhdGNoZXIiXSwiaWF0IjoxNTI1MDE0NDE3fQ.D8ja66bnLxJ3bsJlaKRtOquu8XbibjNCyFxJpI7vafc'
localClientWSID = 'localSealogReceive'
remoteServerIP = '162.243.201.175'
remoteServerAPIPort = '80'
remoteServerWSPort = '8000'
remoteServerPath = '/sealog-server'
remoteToken = "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJpZCI6IjU5ODFmMTY3MjEyYjM0OGFlZDdmYTlmNSIsInNjb3BlIjpbImFkbWluIiwiZXZlbnRfbWFuYWdlciIsImV2ZW50X2xvZ2dlciIsImV2ZW50X3dhdGNoZXIiXSwiaWF0IjoxNTI1MDEzNTUxfQ.8X-fBRUHdrwtkTLcOFAsW-vvvqCzmkZKM2gQgHNkBKk"
remoteClientWSID = 'remoteSealogReceive'
hello = {
'type': 'hello',
'id': remoteClientWSID,
'auth': {
'headers': {
'authorization': remoteToken
}
},
'version': '2',
'subs': ['/ws/status/newEvents']
}
ping = {
'type':'ping',
'id':remoteClientWSID
}
localHeaders = {'authorization': localToken}
remoteHeaders = {'authorization': remoteToken}
async def eventlog():
try:
async with websockets.connect('ws://' + remoteServerIP + ':' + remoteServerWSPort) as websocket:
await websocket.send(json.dumps(hello))
while(True):
event = await websocket.recv()
eventObj = json.loads(event)
print("eventObj:", eventObj)
if eventObj['type'] and eventObj['type'] == 'ping':
await websocket.send(json.dumps(ping))
elif eventObj['type'] and eventObj['type'] == 'pub':
r = requests.post('http://' + localServerIP + ':' + localServerAPIPort + localServerPath + eventsAPIPath, headers=localHeaders, data = json.dumps(eventObj['message']))
print(r.text)
### end of repeat
except Exception as error:
print(error)
asyncio.get_event_loop().run_until_complete(eventlog())
| misc/sealog_repeater_receive.py | 2,257 | !/usr/bin/env python3 still in development end of repeat | 56 | en | 0.672453 |
"""
https://leetcode.com/problems/powerful-integers/
Given two positive integers x and y, an integer is powerful if it is equal to x^i + y^j for some integers i >= 0 and j >= 0.
Return a list of all powerful integers that have value less than or equal to bound.
You may return the answer in any order. In your answer, each value should occur at most once.
Example 1:
Input: x = 2, y = 3, bound = 10
Output: [2,3,4,5,7,9,10]
Explanation:
2 = 2^0 + 3^0
3 = 2^1 + 3^0
4 = 2^0 + 3^1
5 = 2^1 + 3^1
7 = 2^2 + 3^1
9 = 2^3 + 3^0
10 = 2^0 + 3^2
Example 2:
Input: x = 3, y = 5, bound = 15
Output: [2,4,6,8,10,14]
Note:
1 <= x <= 100
1 <= y <= 100
0 <= bound <= 10^6
"""
# time complexity: O(n), space complexity: O(n^0.5)
class Solution:
def powerfulIntegers(self, x: int, y: int, bound: int) -> List[int]:
xlist = [1]
ylist = [1]
i = 1
while x**i <= bound and x > 1:
xlist.append(x**i)
i += 1
i = 1
while y**i <= bound and y > 1:
ylist.append(y**i)
i += 1
return list(set(x+y for x in xlist for y in ylist if x+y <= bound)) | easy/970-Powerful Integers.py | 1,135 | https://leetcode.com/problems/powerful-integers/
Given two positive integers x and y, an integer is powerful if it is equal to x^i + y^j for some integers i >= 0 and j >= 0.
Return a list of all powerful integers that have value less than or equal to bound.
You may return the answer in any order. In your answer, each value should occur at most once.
Example 1:
Input: x = 2, y = 3, bound = 10
Output: [2,3,4,5,7,9,10]
Explanation:
2 = 2^0 + 3^0
3 = 2^1 + 3^0
4 = 2^0 + 3^1
5 = 2^1 + 3^1
7 = 2^2 + 3^1
9 = 2^3 + 3^0
10 = 2^0 + 3^2
Example 2:
Input: x = 3, y = 5, bound = 15
Output: [2,4,6,8,10,14]
Note:
1 <= x <= 100
1 <= y <= 100
0 <= bound <= 10^6
time complexity: O(n), space complexity: O(n^0.5) | 714 | en | 0.834791 |
class RenderNodeAction(Enum,IComparable,IFormattable,IConvertible):
"""
Enumerated actions for processing a render node during custom export.
enum RenderNodeAction,values: Proceed (0),Skip (1)
"""
def __eq__(self,*args):
""" x.__eq__(y) <==> x==yx.__eq__(y) <==> x==yx.__eq__(y) <==> x==y """
pass
def __format__(self,*args):
""" __format__(formattable: IFormattable,format: str) -> str """
pass
def __ge__(self,*args):
pass
def __gt__(self,*args):
pass
def __init__(self,*args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
def __le__(self,*args):
pass
def __lt__(self,*args):
pass
def __ne__(self,*args):
pass
def __reduce_ex__(self,*args):
pass
def __str__(self,*args):
pass
Proceed=None
Skip=None
value__=None
| stubs.min/Autodesk/Revit/DB/__init___parts/RenderNodeAction.py | 968 | Enumerated actions for processing a render node during custom export.
enum RenderNodeAction,values: Proceed (0),Skip (1)
x.__eq__(y) <==> x==yx.__eq__(y) <==> x==yx.__eq__(y) <==> x==y
__format__(formattable: IFormattable,format: str) -> str
x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature | 451 | en | 0.429152 |
import numpy as np
import torch
from dataclasses import dataclass
from typing import List
from jiant.tasks.core import (
BaseExample,
BaseTokenizedExample,
BaseDataRow,
BatchMixin,
GlueMixin,
Task,
TaskTypes,
)
from jiant.tasks.lib.templates.shared import double_sentence_featurize, labels_to_bimap
from jiant.utils.python.io import read_jsonl
@dataclass
class Example(BaseExample):
guid: str
input_premise: str
input_hypothesis: str
label: str
def tokenize(self, tokenizer):
return TokenizedExample(
guid=self.guid,
input_premise=tokenizer.tokenize(self.input_premise),
input_hypothesis=tokenizer.tokenize(self.input_hypothesis),
label_id=WnliTask.LABEL_TO_ID[self.label],
)
@dataclass
class TokenizedExample(BaseTokenizedExample):
guid: str
input_premise: List
input_hypothesis: List
label_id: int
def featurize(self, tokenizer, feat_spec):
return double_sentence_featurize(
guid=self.guid,
input_tokens_a=self.input_premise,
input_tokens_b=self.input_hypothesis,
label_id=self.label_id,
tokenizer=tokenizer,
feat_spec=feat_spec,
data_row_class=DataRow,
)
@dataclass
class DataRow(BaseDataRow):
guid: str
input_ids: np.ndarray
input_mask: np.ndarray
segment_ids: np.ndarray
label_id: int
tokens: list
@dataclass
class Batch(BatchMixin):
input_ids: torch.LongTensor
input_mask: torch.LongTensor
segment_ids: torch.LongTensor
label_id: torch.LongTensor
tokens: list
class WnliTask(GlueMixin, Task):
Example = Example
TokenizedExample = Example
DataRow = DataRow
Batch = Batch
TASK_TYPE = TaskTypes.CLASSIFICATION
LABELS = ["0", "1"]
LABEL_TO_ID, ID_TO_LABEL = labels_to_bimap(LABELS)
def get_train_examples(self):
return self._create_examples(lines=read_jsonl(self.train_path), set_type="train")
def get_val_examples(self):
return self._create_examples(lines=read_jsonl(self.val_path), set_type="val")
def get_test_examples(self):
return self._create_examples(lines=read_jsonl(self.test_path), set_type="test")
@classmethod
def _create_examples(cls, lines, set_type):
examples = []
for (i, line) in enumerate(lines):
examples.append(
Example(
# NOTE: get_glue_preds() is dependent on this guid format.
guid="%s-%s" % (set_type, i),
input_premise=line["premise"],
input_hypothesis=line["hypothesis"],
label=line["label"] if set_type != "test" else cls.LABELS[-1],
)
)
return examples
| jiant/tasks/lib/wnli.py | 2,829 | NOTE: get_glue_preds() is dependent on this guid format. | 56 | en | 0.472587 |
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""External task balancer.
Overall architecture is:
1. Users interact with clients.
2. Clients make requests against the frontend's REST API.
3. The FE makes a REST call against a worker or worker pool identified by
gcb_external_task_balancer_worker_url. The FE provisions a unique token,
generates a Task instance, and dispatches a REST request to the worker or
worker pool.
4. The worker or worker pool exposes a REST API for use by the FE. Worker
responses contain the name of the worker so the FE can poll a specific worker
for results using the (ticket, name) combination. Workers are in charge both
of doing work and of cleaning up their results. Clients do not talk to
workers directly.
To enable, set up a pool of workers behind a single URL. For example, this might
be a set of machines behind a balancer on GCE or an AWS ELB. Next, set
gcb_external_task_balancer_rest_enabled to True and set
gcb_external_task_balancer_worker_url to the URL of your worker pool. Secure
communication if desired, and write a client against the REST API this module
exposes.
This implementation has the following big limitations:
1. It is insecure. Currently there is no token exchange/validation at the API
level, so anyone who gets a ticket (for example, by listening to HTTP
traffic between clients and the FE) can issue API calls.
2. There is no XSSI/XSRF protection. Note that exposed endpoints will 404 by
default because gcb_external_task_balancer_rest_enabled is False, so the
behavior without overrides does *not* expose unprotected REST endpoints.
3. Old task items hang around forever. Could implement garbage collection cron
to remove them past a TTL.
4. The REST api is missing ability to mark a single task for deletion and to
fetch a paginated list of results (without their payloads) for a given
user_id. Open issue: we do not expose the notion of a project in the REST
API, but we have it in the workers. Should we expose it to allow filtering at
the API level?
5. Add support for one balancer handling multiple pools of workers, not just
one.
6. Manager.mark* methods don't all check that the requested status transition is
valid. This means buggy handlers/workers/clients could cause invalid status
transitions. Fix is to have the Manager throw TransitionError in those cases
and modify the handlers to 400/500.
TODO(johncox): add URL of sample worker implementation once it's finished.
"""
__author__ = [
'johncox@google.com (John Cox)',
]
import logging
import urllib
from controllers import utils
from models import config
from models import custom_modules
from models import entities
from models import transforms
from google.appengine.api import urlfetch
from google.appengine.ext import db
_DISABLE_CACHING_HEADERS = {
'Cache-Control': 'max-age=0, must-revalidate',
'Pragma': 'no-cache',
}
_PAYLOAD = 'payload'
_TICKET = 'ticket'
_PROJECT_NAME = 'project'
_REST_URL_BASE = '/rest/balancer/v1'
_REST_URL_PROJECT = _REST_URL_BASE + '/project'
_REST_URL_TASK = _REST_URL_BASE
_STATUS = 'status'
_USER_ID = 'user_id'
_WORKER_DEADLINE_SECONDS = 5
_WORKER_ID = 'worker_id'
_WORKER_LOCKED = 'Worker locked'
_WORKER_LOCKED_MAX_RETRIES = 3
_LOG = logging.getLogger('modules.balancer.balancer')
logging.basicConfig()
EXTERNAL_TASK_BALANCER_REST_ENABLED = config.ConfigProperty(
'gcb_external_task_balancer_rest_enabled', bool,
('Whether or not to enable the REST endpoints for the external task '
'balancer module. You must also set the external task balancer URL '
'to use this feature.'), default_value=False,
label='Enable task balancer REST endpoints')
EXTERNAL_TASK_BALANCER_WORKER_URL = config.ConfigProperty(
'gcb_external_task_balancer_worker_url', str,
'URL for the worker pool used by the external task balancer module.',
default_value='', label='External task balancer worker URL')
class Error(Exception):
"""Base error class."""
class NotFoundError(Exception):
"""Raised when an op that needs an entity is run with a missing entity."""
class TransitionError(Exception):
"""Raised when an op attempts an invalid transition on a task."""
def _from_json(json_str):
"""Turns json -> object (or None if json cannot be parsed)."""
try:
return transforms.loads(json_str)
except: # Deliberately catching everything. pylint: disable=bare-except
return None
class Manager(object):
"""DAO for external tasks."""
# Treating access as module-protected. pylint: disable=protected-access
@classmethod
def create(cls, user_id=None):
"""Creates task and returns ticket string."""
task = _ExternalTask(status=_ExternalTask.CREATED, user_id=user_id)
return _ExternalTask.get_ticket_by_key(db.put(task))
@classmethod
def get(cls, ticket):
"""Gets task for ticket (or None if no matching task)."""
external_task = db.get(_ExternalTask.get_key_by_ticket(ticket))
if not external_task:
return None
return Task._from_external_task(external_task)
@classmethod
def list(cls, user_id):
"""Returns list of Task matching user_id, ordered by create date."""
return [Task._from_external_task(et) for et in sorted(
_ExternalTask.all().filter(
'%s =' % _ExternalTask.user_id.name, user_id
).fetch(1000), key=lambda task: task.create_date)]
@classmethod
@db.transactional
def mark_deleted(cls, ticket):
task = cls._get_or_raise_not_found_error(ticket)
task.status = _ExternalTask.DELETED
db.put(task)
@classmethod
@db.transactional
def mark_done(cls, ticket, status, result):
if status not in _ExternalTask._TERMINAL_STATUSES:
raise TransitionError(
'mark_done called with non-terminal status ' + status)
task = cls._get_or_raise_not_found_error(ticket)
task.result = result
task.status = status
db.put(task)
@classmethod
@db.transactional
def mark_failed(cls, ticket):
task = cls._get_or_raise_not_found_error(ticket)
task.status = _ExternalTask.FAILED
db.put(task)
@classmethod
@db.transactional
def mark_running(cls, ticket, worker_id):
task = cls._get_or_raise_not_found_error(ticket)
task.status = _ExternalTask.RUNNING
task.worker_id = worker_id
db.put(task)
@classmethod
def _delete(cls, ticket):
key = _ExternalTask.get_key_by_ticket(ticket)
db.delete(key)
@classmethod
def _get_or_raise_not_found_error(cls, ticket):
key = _ExternalTask.get_key_by_ticket(ticket)
task = db.get(key)
if not task:
raise NotFoundError
return task
class Task(object):
"""DTO for external tasks."""
def __init__(
self, change_date, create_date, result, status, ticket, user_id,
worker_id):
self.change_date = change_date
self.create_date = create_date
self.result = result
self.status = status
self.ticket = ticket
self.user_id = user_id
self.worker_id = worker_id
@classmethod
def _from_external_task(cls, external_task):
return cls(
external_task.change_date, external_task.create_date,
external_task.result, external_task.status,
external_task.get_ticket(), external_task.user_id,
external_task.worker_id)
def is_done(self):
return _ExternalTask.is_status_terminal(self.status)
def for_json(self):
return {
'change_date': self.change_date.strftime(
transforms.ISO_8601_DATETIME_FORMAT),
'create_date': self.create_date.strftime(
transforms.ISO_8601_DATETIME_FORMAT),
'result': self.result,
'status': self.status,
'ticket': self.ticket,
'user_id': self.user_id,
'worker_id': self.worker_id,
}
def __eq__(self, other):
return (
isinstance(other, Task) and
self.change_date == other.change_date and
self.create_date == other.create_date and
self.result == other.result and
self.status == other.status and
self.ticket == other.ticket and
self.user_id == other.user_id and
self.worker_id == other.worker_id)
def __ne__(self, other):
return not self.__eq__(other)
def __str__(self):
return (
'Task - change_date: %(change_date)s, '
'create_date: %(create_date)s, result: %(result)s, '
'status: %(status)s, ticket: %(ticket)s, user_id: %(user_id)s, '
'worker_id: %(worker_id)s' % self.to_dict())
class _ExternalTask(entities.BaseEntity):
"""Storage for external tasks."""
# States a task may be in.
COMPLETE = 'complete' # Done running and in known success state.
CREATED = 'created' # Datastore entity created, but task not yet running.
DELETED = 'deleted' # Marked for deletion; could be deleted later.
FAILED = 'failed' # Done running and in known failure state.
RUNNING = 'running' # Currently running on a worker.
_PENDING_STATUSES = frozenset([
CREATED,
RUNNING,
])
_TERMINAL_STATUSES = frozenset([
COMPLETE,
DELETED,
FAILED,
])
STATUSES = _PENDING_STATUSES.union(_TERMINAL_STATUSES)
# When the task was last edited.
change_date = db.DateTimeProperty(required=True, auto_now=True)
# When the task was created.
create_date = db.DateTimeProperty(required=True, auto_now_add=True)
# Output of the task in JSON.
result = db.TextProperty()
# Last observed status of the task. Can be inaccurate: for example, if a
# user creates a new task but navigates away before the task completes and
# their client never fetches the task when it's done, we'll still show it
# running.
status = db.StringProperty(required=True, choices=STATUSES)
# Optional identifier for the user who owns the task. We impose no
# restrictions beyond the identifier being a string <= 500B, per datastore.
user_id = db.StringProperty()
# Identifier for the worker.
worker_id = db.StringProperty()
@classmethod
def get_key_by_ticket(cls, ticket_str):
try:
return db.Key(encoded=ticket_str)
except:
raise ValueError(
'Cannot make _ExternalTask key from ticket value: %s' % (
ticket_str))
@classmethod
def get_ticket_by_key(cls, key):
return str(key)
@classmethod
def is_status_terminal(cls, status):
return status in cls._TERMINAL_STATUSES
def get_ticket(self):
"""Returns string identifier for the task; raises NotSavedError."""
return self.get_ticket_by_key(self.key())
class _Operation(object):
"""Base class for wire operation payloads."""
@classmethod
def from_str(cls, raw_str):
return cls._from_json(transforms.loads(raw_str))
@classmethod
def _from_json(cls, parsed):
# Parse and validate raw input, raising ValueError if necessary.
raise NotImplementedError
def ready(self):
"""True iff the operation has all data it needs to be issued."""
raise NotImplementedError
def to_json(self):
return transforms.dumps(self._to_dict())
def to_url(self):
return urllib.quote_plus(self.to_json())
def update(self, updates_dict):
for k, v in updates_dict.iteritems():
if not hasattr(self, k):
raise ValueError('Cannot set name ' + k)
setattr(self, k, v)
def _to_dict(self):
raise NotImplementedError
class _CreateTaskOperation(_Operation):
def __init__(self, payload, ticket, user_id):
self.payload = payload
self.ticket = ticket
self.user_id = user_id
@classmethod
def _from_json(cls, parsed):
return cls(parsed, None, parsed.get(_USER_ID))
def ready(self):
return self.payload is not None and self.ticket is not None
def _to_dict(self):
return {
_PAYLOAD: self.payload,
_TICKET: self.ticket,
_USER_ID: self.user_id,
}
class _GetProjectOperation(_Operation):
def __init__(self, payload):
self.payload = payload
@classmethod
def _from_json(cls, parsed):
return cls(parsed)
def ready(self):
return self.payload is not None
def _to_dict(self):
return {_PAYLOAD: self.payload}
class _GetTaskOperation(_Operation):
def __init__(self, payload, ticket, worker_id):
self.payload = payload
self.ticket = ticket
self.worker_id = worker_id
@classmethod
def _from_json(cls, parsed):
ticket = parsed.get(_TICKET)
if not ticket:
raise ValueError('%s not set' % _TICKET)
return cls(parsed, ticket, parsed.get(_WORKER_ID))
def ready(self):
return (
self.payload is not None and self.ticket is not None and
self.worker_id is not None)
def _to_dict(self):
return {
_PAYLOAD: self.payload,
_TICKET: self.ticket,
_WORKER_ID: self.worker_id,
}
class _WorkerPool(object):
"""Interface for the pool of machines that do background work."""
@classmethod
def _check_response(cls, response):
return response.has_key(_PAYLOAD)
@classmethod
def _do_fetch(cls, url, method, operation):
try:
response = urlfetch.fetch(
cls._get_url(url, method, operation),
deadline=_WORKER_DEADLINE_SECONDS,
headers=_DISABLE_CACHING_HEADERS, method=method,
payload=cls._get_request_body(method, operation))
return (
response.status_code, cls._transform_response(response))
except urlfetch.DownloadError as e: # 4xx, 5xx, timeouts.
_LOG.error('Unable to dispatch request to pool; error: %s', e)
return 500, {_PAYLOAD: 'Unable to dispatch request'}
@classmethod
def _get_base_url(cls, worker_id=None):
base = (
worker_id if worker_id is not None else
EXTERNAL_TASK_BALANCER_WORKER_URL.value)
return base + '/rest/v1'
@classmethod
def _get_create_task_url(cls):
return cls._get_base_url()
@classmethod
def _get_get_project_url(cls):
return cls._get_base_url() + '/project'
@classmethod
def _get_get_task_url(cls, worker_id):
return cls._get_base_url(worker_id=worker_id)
@classmethod
def _get_request_body(cls, method, operation):
if method == 'GET':
return None
return operation.to_json()
@classmethod
def _get_url(cls, url, method, operation):
if method == 'GET':
return '%s?request=%s' % (url, operation.to_url())
return url
@classmethod
def _transform_response(cls, response):
"""Transforms worker success/error responses into a standard format."""
try:
parsed = transforms.loads(response.content)
if not cls._check_response(parsed):
raise ValueError
return {_PAYLOAD: parsed[_PAYLOAD]}
except: # Catch everything on purpose. pylint: disable=bare-except
_LOG.error(
'Unable to parse worker response: ' + response.content)
return {_PAYLOAD: 'Received invalid response'}
@classmethod
def create_task(cls, operation):
return cls._do_fetch(cls._get_create_task_url(), 'POST', operation)
@classmethod
def get_project(cls, operation):
return cls._do_fetch(cls._get_get_project_url(), 'GET', operation)
@classmethod
def get_task(cls, operation):
return cls._do_fetch(
cls._get_get_task_url(operation.worker_id), 'GET', operation)
class _BaseRestHandler(utils.BaseRESTHandler):
def _send_json_response(self, code, response):
self.response.headers['Content-Disposition'] = 'attachment'
self.response.headers['Content-Type'] = (
'application/javascript; charset=utf-8')
self.response.headers['X-Content-Type-Options'] = 'nosniff'
self.response.headers['Access-Control-Allow-Origin'] = '*'
self.response.status_code = code
self.response.write(transforms.dumps(response))
def _check_config_or_send_error(self):
if not EXTERNAL_TASK_BALANCER_REST_ENABLED.value:
self._send_json_response(404, 'Not found.')
return False
elif not EXTERNAL_TASK_BALANCER_WORKER_URL.value:
self._send_json_response(500, 'No worker pool found.')
return False
return True
class _ProjectRestHandler(_BaseRestHandler):
def get(self):
configured = self._check_config_or_send_error()
if not configured:
return
try:
op = _GetProjectOperation.from_str(self.request.get('request'))
except ValueError:
self._send_json_response(400, 'Bad request')
return
self._send_json_response(*_WorkerPool.get_project(op))
class _TaskRestHandler(_BaseRestHandler):
def _get_payload(self, response):
return response.get(_PAYLOAD)
def _get_status(self, response):
return self._get_payload(response).get(_STATUS)
def _get_task_payload(self, response):
return response.get(_PAYLOAD).get(_PAYLOAD)
def _get_ticket(self, response):
return self._get_payload(response).get(_TICKET)
def _get_worker_id(self, response):
return self._get_payload(response).get(_WORKER_ID)
def _retry_create_task(self, response, op):
tries = 0
while tries < _WORKER_LOCKED_MAX_RETRIES:
tries += 1
_LOG.info('Worker locked; retrying (tries: %s)', tries)
code, response = _WorkerPool.create_task(op)
if not self._worker_locked(response):
return code, response
return code, {_PAYLOAD: _WORKER_LOCKED}
def _worker_locked(self, response):
return response.get(_PAYLOAD) == _WORKER_LOCKED
def get(self):
configured = self._check_config_or_send_error()
if not configured:
return
try:
op = _GetTaskOperation.from_str(self.request.get('request'))
except: # pylint: disable=bare-except
self._send_json_response(400, 'Bad request')
return
task = None
try:
task = Manager.get(op.ticket)
except ValueError:
pass # Invalid ticket; handle as 404.
if not task:
self._send_json_response(
404, 'Task not found for ticket %s' % op.ticket)
return
if task.is_done():
self._send_json_response(200, task.for_json())
return
op.update({_WORKER_ID: task.worker_id})
if not op.ready():
# If the operation cannot be issued now, the most likely cause is
# that a past response from a worker contained insufficient data to
# dispatch requests to that worker (for example, it might not have)
# set the worker_id). We cannot recover; all we can do is signal
# likely programmer error.
self._send_json_response(
500, 'Unable to compose request for worker')
return
code, response = _WorkerPool.get_task(op)
if code != 200:
self._send_json_response(code, response)
return
status = self._get_status(response)
if status is None:
self._send_json_response(500, 'Worker sent partial response')
return
elif _ExternalTask.is_status_terminal(status):
try:
payload = self._get_task_payload(response)
Manager.mark_done(op.ticket, status, payload)
except: # Catch everything. pylint: disable=bare-except
# TODO(johncox): could differentiate here and transition to a
# failed state when the payload is too big so we don't force
# unnecessary refetches against workers.
self._send_json_response(
500, 'Invalid worker status or payload too big')
return
self._send_json_response(*_WorkerPool.get_task(op))
def post(self):
configured = self._check_config_or_send_error()
if not configured:
return
try:
op = _CreateTaskOperation.from_str(self.request.get('request'))
except: # pylint: disable=bare-except
self._send_json_response(400, 'Bad request')
return
# Must allocate ticket at storage level for wire ops against worker, so
# we cannot create the task in one datastore call.
ticket = Manager.create(user_id=op.user_id)
op.update({_TICKET: ticket})
if not op.ready():
self._send_json_response(
500, 'Unable to compose request for worker')
return
code, response = _WorkerPool.create_task(op)
if self._worker_locked(response):
code, response = self._retry_create_task(response, op)
if code != 200:
Manager.mark_failed(ticket)
self._send_json_response(500, self._get_payload(response))
return
request_failed = code != 200
ticket_mismatch = self._get_ticket(response) != ticket
if request_failed or ticket_mismatch:
response = 'Ticket mismatch' if ticket_mismatch else 'Worker failed'
Manager.mark_failed(ticket)
self._send_json_response(500, response)
else: # Worker response indicates success.
Manager.mark_running(ticket, self._get_worker_id(response))
self._send_json_response(code, response)
custom_module = None
def register_module():
global custom_module # pylint: disable=global-statement
global_handlers = [
(_REST_URL_TASK, _TaskRestHandler),
(_REST_URL_PROJECT, _ProjectRestHandler),
]
namespaced_handlers = []
custom_module = custom_modules.Module(
'External Task Balancer', 'External Task Balancer', global_handlers,
namespaced_handlers)
return custom_module
| modules/balancer/balancer.py | 23,311 | Base error class.
DAO for external tasks.
Raised when an op that needs an entity is run with a missing entity.
DTO for external tasks.
Raised when an op attempts an invalid transition on a task.
Storage for external tasks.
Base class for wire operation payloads.
Interface for the pool of machines that do background work.
Turns json -> object (or None if json cannot be parsed).
Transforms worker success/error responses into a standard format.
Creates task and returns ticket string.
Gets task for ticket (or None if no matching task).
Returns string identifier for the task; raises NotSavedError.
Returns list of Task matching user_id, ordered by create date.
True iff the operation has all data it needs to be issued.
External task balancer.
Overall architecture is:
1. Users interact with clients.
2. Clients make requests against the frontend's REST API.
3. The FE makes a REST call against a worker or worker pool identified by
gcb_external_task_balancer_worker_url. The FE provisions a unique token,
generates a Task instance, and dispatches a REST request to the worker or
worker pool.
4. The worker or worker pool exposes a REST API for use by the FE. Worker
responses contain the name of the worker so the FE can poll a specific worker
for results using the (ticket, name) combination. Workers are in charge both
of doing work and of cleaning up their results. Clients do not talk to
workers directly.
To enable, set up a pool of workers behind a single URL. For example, this might
be a set of machines behind a balancer on GCE or an AWS ELB. Next, set
gcb_external_task_balancer_rest_enabled to True and set
gcb_external_task_balancer_worker_url to the URL of your worker pool. Secure
communication if desired, and write a client against the REST API this module
exposes.
This implementation has the following big limitations:
1. It is insecure. Currently there is no token exchange/validation at the API
level, so anyone who gets a ticket (for example, by listening to HTTP
traffic between clients and the FE) can issue API calls.
2. There is no XSSI/XSRF protection. Note that exposed endpoints will 404 by
default because gcb_external_task_balancer_rest_enabled is False, so the
behavior without overrides does *not* expose unprotected REST endpoints.
3. Old task items hang around forever. Could implement garbage collection cron
to remove them past a TTL.
4. The REST api is missing ability to mark a single task for deletion and to
fetch a paginated list of results (without their payloads) for a given
user_id. Open issue: we do not expose the notion of a project in the REST
API, but we have it in the workers. Should we expose it to allow filtering at
the API level?
5. Add support for one balancer handling multiple pools of workers, not just
one.
6. Manager.mark* methods don't all check that the requested status transition is
valid. This means buggy handlers/workers/clients could cause invalid status
transitions. Fix is to have the Manager throw TransitionError in those cases
and modify the handlers to 400/500.
TODO(johncox): add URL of sample worker implementation once it's finished.
Copyright 2015 Google Inc. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS-IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Deliberately catching everything. pylint: disable=bare-except Treating access as module-protected. pylint: disable=protected-access States a task may be in. Done running and in known success state. Datastore entity created, but task not yet running. Marked for deletion; could be deleted later. Done running and in known failure state. Currently running on a worker. When the task was last edited. When the task was created. Output of the task in JSON. Last observed status of the task. Can be inaccurate: for example, if a user creates a new task but navigates away before the task completes and their client never fetches the task when it's done, we'll still show it running. Optional identifier for the user who owns the task. We impose no restrictions beyond the identifier being a string <= 500B, per datastore. Identifier for the worker. Parse and validate raw input, raising ValueError if necessary. 4xx, 5xx, timeouts. Catch everything on purpose. pylint: disable=bare-except pylint: disable=bare-except Invalid ticket; handle as 404. If the operation cannot be issued now, the most likely cause is that a past response from a worker contained insufficient data to dispatch requests to that worker (for example, it might not have) set the worker_id). We cannot recover; all we can do is signal likely programmer error. Catch everything. pylint: disable=bare-except TODO(johncox): could differentiate here and transition to a failed state when the payload is too big so we don't force unnecessary refetches against workers. pylint: disable=bare-except Must allocate ticket at storage level for wire ops against worker, so we cannot create the task in one datastore call. Worker response indicates success. pylint: disable=global-statement | 5,500 | en | 0.885711 |
"""
CLI tests
"""
from tso.tsocli import __main__ as tsocli
import pytest
from unittest.mock import patch, MagicMock, mock_open
mock_configurqation = "{}"
class TestCli:
def test_cli_should_exit_with_no_args(self):
with pytest.raises(SystemExit) as pytest_wrapped_e:
tsocli.main([])
assert pytest_wrapped_e.type == SystemExit
assert pytest_wrapped_e.value.code == 1
def test_cli_should_exit_with_only_one_arg(self):
with pytest.raises(SystemExit) as pytest_wrapped_e_pseudo_name:
tsocli.main(['s'])
with pytest.raises(SystemExit) as pytest_wrapped_e_full_name:
tsocli.main(['schedule'])
# Both Exceptions should be the same
assert pytest_wrapped_e_pseudo_name.type == pytest_wrapped_e_full_name.type
assert pytest_wrapped_e_pseudo_name.value.code == pytest_wrapped_e_full_name.value.code
# The exceptions should be a System Exit
assert pytest_wrapped_e_pseudo_name.type == SystemExit
assert pytest_wrapped_e_pseudo_name.value.code == 1
@patch('configuration.configuration_parser.parse', return_value=mock_configurqation)
@patch('tso.tsocli.command.cli_pipeline')
def test_cli_should_call_pipeline_when_successful(self, mock_pipeline, mock_config_parser):
tsocli.main([
'schedule',
'--start-date-time',
'2019-03-01 19:00',
'--end-date-time',
'2019-03-12 19:00',
'--export-to-file',
'--export-to-browser'
])
assert mock_pipeline.called
@patch('configuration.configuration_parser.parse', return_value=mock_configurqation)
@patch('tso.tsocli.command.cli_pipeline')
def test_cli_should_have_default_date_time_values(self, mock_pipeline, mock_config_parser):
tsocli.main([
'schedule',
'--export-to-file'
])
assert mock_pipeline.call_args.start_date_time
assert mock_pipeline.call_args.end_date_time
| src/tso/tsocli/tests/test_cli.py | 2,032 | CLI tests
Both Exceptions should be the same The exceptions should be a System Exit | 85 | en | 0.83666 |
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import datetime
import unittest
from unittest import mock
from airflow.models.dag import DAG
from airflow.providers.microsoft.azure.transfers.local_to_wasb import LocalFilesystemToWasbOperator
class TestLocalFilesystemToWasbOperator(unittest.TestCase):
_config = {
'file_path': 'file',
'container_name': 'container',
'blob_name': 'blob',
'wasb_conn_id': 'wasb_default',
'retries': 3,
}
def setUp(self):
args = {'owner': 'airflow', 'start_date': datetime.datetime(2017, 1, 1)}
self.dag = DAG('test_dag_id', default_args=args)
def test_init(self):
operator = LocalFilesystemToWasbOperator(task_id='wasb_operator_1', dag=self.dag, **self._config)
assert operator.file_path == self._config['file_path']
assert operator.container_name == self._config['container_name']
assert operator.blob_name == self._config['blob_name']
assert operator.wasb_conn_id == self._config['wasb_conn_id']
assert operator.load_options == {}
assert operator.retries == self._config['retries']
operator = LocalFilesystemToWasbOperator(
task_id='wasb_operator_2', dag=self.dag, load_options={'timeout': 2}, **self._config
)
assert operator.load_options == {'timeout': 2}
@mock.patch('airflow.providers.microsoft.azure.transfers.local_to_wasb.WasbHook', autospec=True)
def test_execute(self, mock_hook):
mock_instance = mock_hook.return_value
operator = LocalFilesystemToWasbOperator(
task_id='wasb_sensor', dag=self.dag, load_options={'timeout': 2}, **self._config
)
operator.execute(None)
mock_instance.load_file.assert_called_once_with('file', 'container', 'blob', timeout=2)
| tests/providers/microsoft/azure/transfers/test_local_to_wasb.py | 2,571 | Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with this work for additional information regarding copyright ownership. The ASF licenses this file to you under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. | 752 | en | 0.883564 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.